diff --git a/bridges/bin/millau/node/src/chain_spec.rs b/bridges/bin/millau/node/src/chain_spec.rs
index ca47b39018d6fc0407ce7b39325ef3e207a2d4f7..c32291fb385814cfc0cf7eaa63c73e46b58de442 100644
--- a/bridges/bin/millau/node/src/chain_spec.rs
+++ b/bridges/bin/millau/node/src/chain_spec.rs
@@ -16,8 +16,9 @@
 
 use bp_millau::derive_account_from_rialto_id;
 use millau_runtime::{
-	AccountId, AuraConfig, BalancesConfig, BridgeRialtoMessagesConfig, BridgeWestendGrandpaConfig, GenesisConfig,
-	GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
+	AccountId, AuraConfig, BalancesConfig, BridgeRialtoMessagesConfig, BridgeWestendGrandpaConfig,
+	GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig,
+	WASM_BINARY,
 };
 use sp_consensus_aura::sr25519::AuthorityId as AuraId;
 use sp_core::{sr25519, Pair, Public};
@@ -190,12 +191,8 @@ fn testnet_genesis(
 		balances: BalancesConfig {
 			balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
 		},
-		aura: AuraConfig {
-			authorities: Vec::new(),
-		},
-		grandpa: GrandpaConfig {
-			authorities: Vec::new(),
-		},
+		aura: AuraConfig { authorities: Vec::new() },
+		grandpa: GrandpaConfig { authorities: Vec::new() },
 		sudo: SudoConfig { key: root_key },
 		session: SessionConfig {
 			keys: initial_authorities
@@ -220,9 +217,7 @@ fn testnet_genesis(
 #[test]
 fn derived_dave_account_is_as_expected() {
 	let dave = get_account_id_from_seed::<sr25519::Public>("Dave");
-	let derived: AccountId = derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave));
-	assert_eq!(
-		derived.to_string(),
-		"5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string()
-	);
+	let derived: AccountId =
+		derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave));
+	assert_eq!(derived.to_string(), "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string());
 }
diff --git a/bridges/bin/millau/node/src/command.rs b/bridges/bin/millau/node/src/command.rs
index 61786452e03e7b8d00b710423e5f0e7f2be5a126..4285ecaced5161eda614a03e22c6e3ebc6f478d8 100644
--- a/bridges/bin/millau/node/src/command.rs
+++ b/bridges/bin/millau/node/src/command.rs
@@ -14,9 +14,11 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{Cli, Subcommand};
-use crate::service;
-use crate::service::new_partial;
+use crate::{
+	cli::{Cli, Subcommand},
+	service,
+	service::new_partial,
+};
 use millau_runtime::{Block, RuntimeApi};
 use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli};
 use sc_service::PartialComponents;
@@ -75,7 +77,7 @@ pub fn run() -> sc_cli::Result<()> {
 	));
 
 	match &cli.subcommand {
-		Some(Subcommand::Benchmark(cmd)) => {
+		Some(Subcommand::Benchmark(cmd)) =>
 			if cfg!(feature = "runtime-benchmarks") {
 				let runner = cli.create_runner(cmd)?;
 
@@ -86,8 +88,7 @@ pub fn run() -> sc_cli::Result<()> {
 				You can enable it with `--features runtime-benchmarks`."
 				);
 				Ok(())
-			}
-		}
+			},
 		Some(Subcommand::Key(cmd)) => cmd.run(&cli),
 		Some(Subcommand::Sign(cmd)) => cmd.run(),
 		Some(Subcommand::Verify(cmd)) => cmd.run(),
@@ -95,69 +96,53 @@ pub fn run() -> sc_cli::Result<()> {
 		Some(Subcommand::BuildSpec(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
-		}
+		},
 		Some(Subcommand::CheckBlock(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&config)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, config.database), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportState(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, config.chain_spec), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&config)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.database))
-		}
+		},
 		Some(Subcommand::Revert(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					backend,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, backend), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::Inspect(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.sync_run(|config| cmd.run::<Block, RuntimeApi, service::ExecutorDispatch>(config))
-		}
+			runner
+				.sync_run(|config| cmd.run::<Block, RuntimeApi, service::ExecutorDispatch>(config))
+		},
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
 			runner.run_node_until_exit(|config| async move {
@@ -167,6 +152,6 @@ pub fn run() -> sc_cli::Result<()> {
 				}
 				.map_err(sc_cli::Error::Service)
 			})
-		}
+		},
 	}
 }
diff --git a/bridges/bin/millau/node/src/service.rs b/bridges/bin/millau/node/src/service.rs
index 3e8d16f596075060e1ea178e6444c913354fd5e7..ce9cfc10ef1017bfbbedcf46498289a9ddfc96dd 100644
--- a/bridges/bin/millau/node/src/service.rs
+++ b/bridges/bin/millau/node/src/service.rs
@@ -21,8 +21,8 @@
 // =====================================================================================
 // UPDATE GUIDE:
 // 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo);
-// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs;
-// 3) fix compilation errors;
+// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom
+// RPCs; 3) fix compilation errors;
 // 4) test :)
 // =====================================================================================
 // =====================================================================================
@@ -57,7 +57,8 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
 	}
 }
 
-type FullClient = sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
+type FullClient =
+	sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
 type FullBackend = sc_service::TFullBackend<Block>;
 type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
 
@@ -72,7 +73,12 @@ pub fn new_partial(
 		sc_consensus::DefaultImportQueue<Block, FullClient>,
 		sc_transaction_pool::FullPool<Block, FullClient>,
 		(
-			sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
+			sc_finality_grandpa::GrandpaBlockImport<
+				FullBackend,
+				Block,
+				FullClient,
+				FullSelectChain,
+			>,
 			sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
 			Option<Telemetry>,
 		),
@@ -80,7 +86,7 @@ pub fn new_partial(
 	ServiceError,
 > {
 	if config.keystore_remote.is_some() {
-		return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
+		return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()))
 	}
 
 	let telemetry = config
@@ -100,11 +106,12 @@ pub fn new_partial(
 		config.max_runtime_instances,
 	);
 
-	let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
-		config,
-		telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-		executor,
-	)?;
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
@@ -131,26 +138,30 @@ pub fn new_partial(
 
 	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
 
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import.clone())),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let import_queue =
+		sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
+			block_import: grandpa_block_import.clone(),
+			justification_import: Some(Box::new(grandpa_block_import.clone())),
+			client: client.clone(),
+			create_inherent_data_providers: move |_, ()| async move {
+				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
-
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
+				let slot =
+					sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+						*timestamp,
+						slot_duration,
+					);
+
+				Ok((timestamp, slot))
+			},
+			spawner: &task_manager.spawn_essential_handle(),
+			can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(
+				client.executor().clone(),
+			),
+			registry: config.prometheus_registry(),
+			check_for_equivocation: Default::default(),
+			telemetry: telemetry.as_ref().map(|x| x.handle()),
+		})?;
 
 	Ok(sc_service::PartialComponents {
 		client,
@@ -187,37 +198,39 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 	if let Some(url) = &config.keystore_remote {
 		match remote_keystore(url) {
 			Ok(k) => keystore_container.set_remote_keystore(k),
-			Err(e) => {
+			Err(e) =>
 				return Err(ServiceError::Other(format!(
 					"Error hooking up remote keystore for {}: {}",
 					url, e
-				)))
-			}
+				))),
 		};
 	}
 
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
 	let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
 		backend.clone(),
 		grandpa_link.shared_authority_set().clone(),
 	));
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: None,
-		block_announce_validator_builder: None,
-		warp_sync: Some(warp_sync),
-	})?;
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: None,
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
 	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+		sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
 	}
 
 	let role = config.role.clone();
@@ -244,8 +257,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		let shared_authority_set = grandpa_link.shared_authority_set().clone();
 		let shared_voter_state = shared_voter_state.clone();
 
-		let finality_proof_provider =
-			GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
+		let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
+			backend,
+			Some(shared_authority_set.clone()),
+		);
 
 		Box::new(move |_, subscription_executor| {
 			let mut io = jsonrpc_core::IoHandler::default();
@@ -292,37 +307,40 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 			telemetry.as_ref().map(|x| x.handle()),
 		);
 
-		let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
+		let can_author_with =
+			sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
 
 		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
 		let raw_slot_duration = slot_duration.slot_duration();
 
-		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
-			slot_duration,
-			client,
-			select_chain,
-			block_import,
-			proposer_factory,
-			create_inherent_data_providers: move |_, ()| async move {
-				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-				let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(
+			StartAuraParams {
+				slot_duration,
+				client,
+				select_chain,
+				block_import,
+				proposer_factory,
+				create_inherent_data_providers: move |_, ()| async move {
+					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+					let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
 					*timestamp,
 					raw_slot_duration,
 				);
 
-				Ok((timestamp, slot))
+					Ok((timestamp, slot))
+				},
+				force_authoring,
+				backoff_authoring_blocks,
+				keystore: keystore_container.sync_keystore(),
+				can_author_with,
+				sync_oracle: network.clone(),
+				justification_sync_link: network.clone(),
+				block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
+				max_block_proposal_slot_portion: None,
+				telemetry: telemetry.as_ref().map(|x| x.handle()),
 			},
-			force_authoring,
-			backoff_authoring_blocks,
-			keystore: keystore_container.sync_keystore(),
-			can_author_with,
-			sync_oracle: network.clone(),
-			justification_sync_link: network.clone(),
-			block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
-			max_block_proposal_slot_portion: None,
-			telemetry: telemetry.as_ref().map(|x| x.handle()),
-		})?;
+		)?;
 
 		// the AURA authoring task is considered essential, i.e. if it
 		// fails we take down the service with it.
@@ -331,11 +349,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 	// if the node isn't actively participating in consensus then it doesn't
 	// need a keystore, regardless of which protocol we use below.
-	let keystore = if role.is_authority() {
-		Some(keystore_container.sync_keystore())
-	} else {
-		None
-	};
+	let keystore =
+		if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
 
 	let grandpa_config = sc_finality_grandpa::Config {
 		// FIXME #1578 make this available through chainspec
@@ -367,9 +382,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 		// the GRANDPA voter task is considered infallible, i.e.
 		// if it fails we take down the service with it.
-		task_manager
-			.spawn_essential_handle()
-			.spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"grandpa-voter",
+			sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
+		);
 	}
 
 	network_starter.start_network();
@@ -407,10 +423,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		telemetry
 	});
 
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
 
 	let select_chain = sc_consensus::LongestChain::new(backend.clone());
 
@@ -431,45 +444,53 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
 
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import)),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let import_queue =
+		sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
+			block_import: grandpa_block_import.clone(),
+			justification_import: Some(Box::new(grandpa_block_import)),
+			client: client.clone(),
+			create_inherent_data_providers: move |_, ()| async move {
+				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
-
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::NeverCanAuthor,
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
+				let slot =
+					sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+						*timestamp,
+						slot_duration,
+					);
+
+				Ok((timestamp, slot))
+			},
+			spawner: &task_manager.spawn_essential_handle(),
+			can_author_with: sp_consensus::NeverCanAuthor,
+			registry: config.prometheus_registry(),
+			check_for_equivocation: Default::default(),
+			telemetry: telemetry.as_ref().map(|x| x.handle()),
+		})?;
 
 	let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
 		backend.clone(),
 		grandpa_link.shared_authority_set().clone(),
 	));
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: Some(on_demand.clone()),
-		block_announce_validator_builder: None,
-		warp_sync: Some(warp_sync),
-	})?;
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: Some(on_demand.clone()),
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
 	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+		sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
 	}
 
 	let enable_grandpa = !config.disable_grandpa;
diff --git a/bridges/bin/millau/runtime/src/lib.rs b/bridges/bin/millau/runtime/src/lib.rs
index ce3ca28d399847fffb29f048aead2b5e7ec58da3..698ad1e580465db887ff851de4bdc592a4beb672 100644
--- a/bridges/bin/millau/runtime/src/lib.rs
+++ b/bridges/bin/millau/runtime/src/lib.rs
@@ -34,15 +34,19 @@ pub mod rialto_messages;
 
 use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge};
 
-use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge};
-use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
+use bridge_runtime_common::messages::{
+	source::estimate_message_dispatch_and_delivery_fee, MessageBridge,
+};
+use pallet_grandpa::{
+	fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList,
+};
 use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo};
 use sp_api::impl_runtime_apis;
 use sp_consensus_aura::sr25519::AuthorityId as AuraId;
 use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
-use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys};
 use sp_runtime::{
 	create_runtime_str, generic, impl_opaque_keys,
+	traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys},
 	transaction_validity::{TransactionSource, TransactionValidity},
 	ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill,
 };
@@ -61,8 +65,9 @@ pub use frame_support::{
 
 pub use frame_system::Call as SystemCall;
 pub use pallet_balances::Call as BalancesCall;
-pub use pallet_bridge_grandpa::Call as BridgeGrandpaRialtoCall;
-pub use pallet_bridge_grandpa::Call as BridgeGrandpaWestendCall;
+pub use pallet_bridge_grandpa::{
+	Call as BridgeGrandpaRialtoCall, Call as BridgeGrandpaWestendCall,
+};
 pub use pallet_bridge_messages::Call as MessagesCall;
 pub use pallet_sudo::Call as SudoCall;
 pub use pallet_timestamp::Call as TimestampCall;
@@ -138,10 +143,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 /// The version information used to identify this runtime when compiled natively.
 #[cfg(feature = "std")]
 pub fn native_version() -> NativeVersion {
-	NativeVersion {
-		runtime_version: VERSION,
-		can_author_with: Default::default(),
-	}
+	NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
 }
 
 parameter_types! {
@@ -231,9 +233,12 @@ impl pallet_grandpa::Config for Runtime {
 	type Event = Event;
 	type Call = Call;
 	type KeyOwnerProofSystem = ();
-	type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
-	type KeyOwnerIdentification =
-		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
+	type KeyOwnerProof =
+		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
+	type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		GrandpaId,
+	)>>::IdentificationTuple;
 	type HandleEquivocation = ();
 	// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
 	type WeightInfo = ();
@@ -394,14 +399,16 @@ impl pallet_bridge_messages::Config<WithRialtoMessagesInstance> for Runtime {
 
 	type TargetHeaderChain = crate::rialto_messages::Rialto;
 	type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier;
-	type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
-		Runtime,
-		pallet_balances::Pallet<Runtime>,
-		GetDeliveryConfirmationTransactionFee,
-		RootAccountForPayments,
-	>;
+	type MessageDeliveryAndDispatchPayment =
+		pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
+			Runtime,
+			pallet_balances::Pallet<Runtime>,
+			GetDeliveryConfirmationTransactionFee,
+			RootAccountForPayments,
+		>;
 	type OnMessageAccepted = ();
-	type OnDeliveryConfirmed = pallet_bridge_token_swap::Pallet<Runtime, WithRialtoTokenSwapInstance>;
+	type OnDeliveryConfirmed =
+		pallet_bridge_token_swap::Pallet<Runtime, WithRialtoTokenSwapInstance>;
 
 	type SourceHeaderChain = crate::rialto_messages::Rialto;
 	type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch;
@@ -488,8 +495,13 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signatu
 /// Extrinsic type that has already been checked.
 pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
 /// Executive: handles dispatch to the various modules.
-pub type Executive =
-	frame_executive::Executive<Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPallets>;
+pub type Executive = frame_executive::Executive<
+	Runtime,
+	Block,
+	frame_system::ChainContext<Runtime>,
+	Runtime,
+	AllPallets,
+>;
 
 impl_runtime_apis! {
 	impl sp_api::Core<Block> for Runtime {
@@ -742,15 +754,18 @@ mod tests {
 			bp_millau::max_extrinsic_size(),
 			bp_millau::max_extrinsic_weight(),
 			max_incoming_message_proof_size,
-			messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()),
+			messages::target::maximal_incoming_message_dispatch_weight(
+				bp_millau::max_extrinsic_weight(),
+			),
 		);
 
-		let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
-			bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
-			bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
-			bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
-		)
-		.unwrap_or(u32::MAX);
+		let max_incoming_inbound_lane_data_proof_size =
+			bp_messages::InboundLaneData::<()>::encoded_size_hint(
+				bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
+				bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
+				bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
+			)
+			.unwrap_or(u32::MAX);
 		pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights>(
 			bp_millau::max_extrinsic_size(),
 			bp_millau::max_extrinsic_weight(),
diff --git a/bridges/bin/millau/runtime/src/rialto_messages.rs b/bridges/bin/millau/runtime/src/rialto_messages.rs
index 1bc15caa1056591d370a130ea3318bc707d87527..37fc7715f989d8b4896630856ffe6f9403e92499 100644
--- a/bridges/bin/millau/runtime/src/rialto_messages.rs
+++ b/bridges/bin/millau/runtime/src/rialto_messages.rs
@@ -35,7 +35,8 @@ use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128};
 use sp_std::{convert::TryFrom, ops::RangeInclusive};
 
 /// Initial value of `RialtoToMillauConversionRate` parameter.
-pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
+pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 =
+	FixedU128::from_inner(FixedU128::DIV);
 /// Initial value of `RialtoFeeMultiplier` parameter.
 pub const INITIAL_RIALTO_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
 
@@ -47,13 +48,16 @@ parameter_types! {
 }
 
 /// Message payload for Millau -> Rialto messages.
-pub type ToRialtoMessagePayload = messages::source::FromThisChainMessagePayload<WithRialtoMessageBridge>;
+pub type ToRialtoMessagePayload =
+	messages::source::FromThisChainMessagePayload<WithRialtoMessageBridge>;
 
 /// Message verifier for Millau -> Rialto messages.
-pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifier<WithRialtoMessageBridge>;
+pub type ToRialtoMessageVerifier =
+	messages::source::FromThisChainMessageVerifier<WithRialtoMessageBridge>;
 
 /// Message payload for Rialto -> Millau messages.
-pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload<WithRialtoMessageBridge>;
+pub type FromRialtoMessagePayload =
+	messages::target::FromBridgedChainMessagePayload<WithRialtoMessageBridge>;
 
 /// Encoded Millau Call as it comes from Rialto.
 pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
@@ -62,7 +66,8 @@ pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessag
 type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof<bp_rialto::Hash>;
 
 /// Messages delivery proof for Millau -> Rialto messages.
-type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof<bp_rialto::Hash>;
+type ToRialtoMessagesDeliveryProof =
+	messages::source::FromBridgedChainMessagesDeliveryProof<bp_rialto::Hash>;
 
 /// Call-dispatch based message dispatch for Rialto -> Millau messages.
 pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch<
@@ -86,8 +91,10 @@ impl MessageBridge for WithRialtoMessageBridge {
 	type BridgedChain = Rialto;
 
 	fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance {
-		bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance))
-			.unwrap_or(bp_millau::Balance::MAX)
+		bp_millau::Balance::try_from(
+			RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance),
+		)
+		.unwrap_or(bp_millau::Balance::MAX)
 	}
 }
 
@@ -108,7 +115,9 @@ impl messages::ThisChainWithMessages for Millau {
 	type Call = crate::Call;
 
 	fn is_outbound_lane_enabled(lane: &LaneId) -> bool {
-		*lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1] || *lane == crate::TokenSwapMessagesLane::get()
+		*lane == [0, 0, 0, 0] ||
+			*lane == [0, 0, 0, 1] ||
+			*lane == crate::TokenSwapMessagesLane::get()
 	}
 
 	fn maximal_pending_messages_at_outbound_lane() -> MessageNonce {
@@ -167,12 +176,15 @@ impl messages::BridgedChainWithMessages for Rialto {
 
 	fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive<Weight> {
 		// we don't want to relay too large messages + keep reserve for future upgrades
-		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight());
+		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(
+			bp_rialto::max_extrinsic_weight(),
+		);
 
-		// we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` function
+		// we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment`
+		// function
 		//
-		// this bridge may be used to deliver all kind of messages, so we're not making any assumptions about
-		// minimal dispatch weight here
+		// this bridge may be used to deliver all kind of messages, so we're not making any
+		// assumptions about minimal dispatch weight here
 
 		0..=upper_limit
 	}
@@ -232,9 +244,11 @@ impl TargetHeaderChain<ToRialtoMessagePayload, bp_rialto::AccountId> for Rialto
 	fn verify_messages_delivery_proof(
 		proof: Self::MessagesDeliveryProof,
 	) -> Result<(LaneId, InboundLaneData<bp_millau::AccountId>), Self::Error> {
-		messages::source::verify_messages_delivery_proof::<WithRialtoMessageBridge, Runtime, crate::RialtoGrandpaInstance>(
-			proof,
-		)
+		messages::source::verify_messages_delivery_proof::<
+			WithRialtoMessageBridge,
+			Runtime,
+			crate::RialtoGrandpaInstance,
+		>(proof)
 	}
 }
 
@@ -251,10 +265,11 @@ impl SourceHeaderChain<bp_rialto::Balance> for Rialto {
 		proof: Self::MessagesProof,
 		messages_count: u32,
 	) -> Result<ProvedMessages<Message<bp_rialto::Balance>>, Self::Error> {
-		messages::target::verify_messages_proof::<WithRialtoMessageBridge, Runtime, crate::RialtoGrandpaInstance>(
-			proof,
-			messages_count,
-		)
+		messages::target::verify_messages_proof::<
+			WithRialtoMessageBridge,
+			Runtime,
+			crate::RialtoGrandpaInstance,
+		>(proof, messages_count)
 	}
 }
 
@@ -268,9 +283,8 @@ pub enum MillauToRialtoMessagesParameter {
 impl MessagesParameter for MillauToRialtoMessagesParameter {
 	fn save(&self) {
 		match *self {
-			MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => {
-				RialtoToMillauConversionRate::set(conversion_rate)
-			}
+			MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) =>
+				RialtoToMillauConversionRate::set(conversion_rate),
 		}
 	}
 }
diff --git a/bridges/bin/rialto-parachain/node/src/chain_spec.rs b/bridges/bin/rialto-parachain/node/src/chain_spec.rs
index 728c4a0a8ec8e12f32cdc61491a172c2d10dedbc..f93887a21e47918d42c1cc34e8e147be426857c5 100644
--- a/bridges/bin/rialto-parachain/node/src/chain_spec.rs
+++ b/bridges/bin/rialto-parachain/node/src/chain_spec.rs
@@ -23,7 +23,8 @@ use sp_core::{sr25519, Pair, Public};
 use sp_runtime::traits::{IdentifyAccount, Verify};
 
 /// Specialized `ChainSpec` for the normal parachain runtime.
-pub type ChainSpec = sc_service::GenericChainSpec<rialto_parachain_runtime::GenesisConfig, Extensions>;
+pub type ChainSpec =
+	sc_service::GenericChainSpec<rialto_parachain_runtime::GenesisConfig, Extensions>;
 
 /// Helper function to generate a crypto pair from seed
 pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
@@ -157,9 +158,7 @@ fn testnet_genesis(
 		},
 		sudo: rialto_parachain_runtime::SudoConfig { key: root_key },
 		parachain_info: rialto_parachain_runtime::ParachainInfoConfig { parachain_id: id },
-		aura: rialto_parachain_runtime::AuraConfig {
-			authorities: initial_authorities,
-		},
+		aura: rialto_parachain_runtime::AuraConfig { authorities: initial_authorities },
 		aura_ext: Default::default(),
 		// parachain_system: Default::default(),
 	}
diff --git a/bridges/bin/rialto-parachain/node/src/cli.rs b/bridges/bin/rialto-parachain/node/src/cli.rs
index 3a55da24d909233468dca9a85867180d52fa5bd0..bc2238e2fd44e687661964e43c8d9978cfbd8ec8 100644
--- a/bridges/bin/rialto-parachain/node/src/cli.rs
+++ b/bridges/bin/rialto-parachain/node/src/cli.rs
@@ -131,14 +131,7 @@ impl RelayChainCli {
 	) -> Self {
 		let extension = chain_spec::Extensions::try_get(&*para_config.chain_spec);
 		let chain_id = extension.map(|e| e.relay_chain.clone());
-		let base_path = para_config
-			.base_path
-			.as_ref()
-			.map(|x| x.path().join("rialto-bridge-node"));
-		Self {
-			base_path,
-			chain_id,
-			base: polkadot_cli::RunCmd::from_iter(relay_chain_args),
-		}
+		let base_path = para_config.base_path.as_ref().map(|x| x.path().join("rialto-bridge-node"));
+		Self { base_path, chain_id, base: polkadot_cli::RunCmd::from_iter(relay_chain_args) }
 	}
 }
diff --git a/bridges/bin/rialto-parachain/node/src/command.rs b/bridges/bin/rialto-parachain/node/src/command.rs
index a2cffd0fae74ff63fb50bde3d8f01acf3f1f6dcb..eb9aba2c104ba12a39d0fea6e96679cfe0b41c9d 100644
--- a/bridges/bin/rialto-parachain/node/src/command.rs
+++ b/bridges/bin/rialto-parachain/node/src/command.rs
@@ -26,15 +26,18 @@ use log::info;
 use polkadot_parachain::primitives::AccountIdConversion;
 use rialto_parachain_runtime::{Block, RuntimeApi};
 use sc_cli::{
-	ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, NetworkParams, Result,
-	RuntimeVersion, SharedParams, SubstrateCli,
+	ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams,
+	NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli,
 };
 use sc_service::config::{BasePath, PrometheusConfig};
 use sp_core::hexdisplay::HexDisplay;
 use sp_runtime::traits::Block as BlockT;
 use std::{io::Write, net::SocketAddr};
 
-fn load_spec(id: &str, para_id: ParaId) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
+fn load_spec(
+	id: &str,
+	para_id: ParaId,
+) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
 	Ok(match id {
 		"dev" => Box::new(chain_spec::development_config(para_id)),
 		"" | "local" => Box::new(chain_spec::local_testnet_config(para_id)),
@@ -158,44 +161,51 @@ pub fn run() -> Result<()> {
 		Some(Subcommand::BuildSpec(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
-		}
+		},
 		Some(Subcommand::CheckBlock(cmd)) => {
 			construct_async_run!(|components, cli, cmd, config| {
 				Ok(cmd.run(components.client, components.import_queue))
 			})
-		}
+		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| Ok(cmd.run(components.client, config.database)))
-		}
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, config.database)
+			))
+		},
 		Some(Subcommand::ExportState(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| Ok(cmd.run(components.client, config.chain_spec)))
-		}
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, config.chain_spec)
+			))
+		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
 			construct_async_run!(|components, cli, cmd, config| {
 				Ok(cmd.run(components.client, components.import_queue))
 			})
-		}
+		},
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 
 			runner.sync_run(|config| {
 				let polkadot_cli = RelayChainCli::new(
 					&config,
-					[RelayChainCli::executable_name()]
-						.iter()
-						.chain(cli.relaychain_args.iter()),
+					[RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()),
 				);
 
-				let polkadot_config =
-					SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, config.tokio_handle.clone())
-						.map_err(|err| format!("Relay chain argument error: {}", err))?;
+				let polkadot_config = SubstrateCli::create_configuration(
+					&polkadot_cli,
+					&polkadot_cli,
+					config.tokio_handle.clone(),
+				)
+				.map_err(|err| format!("Relay chain argument error: {}", err))?;
 
 				cmd.run(config, polkadot_config)
 			})
-		}
+		},
 		Some(Subcommand::Revert(cmd)) => {
-			construct_async_run!(|components, cli, cmd, config| Ok(cmd.run(components.client, components.backend)))
-		}
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, components.backend)
+			))
+		},
 		Some(Subcommand::ExportGenesisState(params)) => {
 			let mut builder = sc_cli::LoggerBuilder::new("");
 			builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
@@ -219,13 +229,14 @@ pub fn run() -> Result<()> {
 			}
 
 			Ok(())
-		}
+		},
 		Some(Subcommand::ExportGenesisWasm(params)) => {
 			let mut builder = sc_cli::LoggerBuilder::new("");
 			builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
 			let _ = builder.init();
 
-			let raw_wasm_blob = extract_genesis_wasm(&*cli.load_spec(&params.chain.clone().unwrap_or_default())?)?;
+			let raw_wasm_blob =
+				extract_genesis_wasm(&*cli.load_spec(&params.chain.clone().unwrap_or_default())?)?;
 			let output_buf = if params.raw {
 				raw_wasm_blob
 			} else {
@@ -239,8 +250,8 @@ pub fn run() -> Result<()> {
 			}
 
 			Ok(())
-		}
-		Some(Subcommand::Benchmark(cmd)) => {
+		},
+		Some(Subcommand::Benchmark(cmd)) =>
 			if cfg!(feature = "runtime-benchmarks") {
 				let runner = cli.create_runner(cmd)?;
 
@@ -249,46 +260,46 @@ pub fn run() -> Result<()> {
 				Err("Benchmarking wasn't enabled when building the node. \
 				You can enable it with `--features runtime-benchmarks`."
 					.into())
-			}
-		}
+			},
 		None => {
 			let runner = cli.create_runner(&cli.run.normalize())?;
 
 			runner.run_node_until_exit(|config| async move {
-				let para_id = chain_spec::Extensions::try_get(&*config.chain_spec).map(|e| e.para_id);
+				let para_id =
+					chain_spec::Extensions::try_get(&*config.chain_spec).map(|e| e.para_id);
 
 				let polkadot_cli = RelayChainCli::new(
 					&config,
-					[RelayChainCli::executable_name()]
-						.iter()
-						.chain(cli.relaychain_args.iter()),
+					[RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()),
 				);
 
 				let id = ParaId::from(cli.run.parachain_id.or(para_id).expect("Missing ParaId"));
 
-				let parachain_account = AccountIdConversion::<polkadot_primitives::v0::AccountId>::into_account(&id);
+				let parachain_account =
+					AccountIdConversion::<polkadot_primitives::v0::AccountId>::into_account(&id);
 
-				let block: Block = generate_genesis_block(&config.chain_spec).map_err(|e| format!("{:?}", e))?;
+				let block: Block =
+					generate_genesis_block(&config.chain_spec).map_err(|e| format!("{:?}", e))?;
 				let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode()));
 
-				let polkadot_config =
-					SubstrateCli::create_configuration(&polkadot_cli, &polkadot_cli, config.tokio_handle.clone())
-						.map_err(|err| format!("Relay chain argument error: {}", err))?;
+				let polkadot_config = SubstrateCli::create_configuration(
+					&polkadot_cli,
+					&polkadot_cli,
+					config.tokio_handle.clone(),
+				)
+				.map_err(|err| format!("Relay chain argument error: {}", err))?;
 
 				info!("Parachain id: {:?}", id);
 				info!("Parachain Account: {}", parachain_account);
 				info!("Parachain genesis state: {}", genesis_state);
-				info!(
-					"Is collating: {}",
-					if config.role.is_authority() { "yes" } else { "no" }
-				);
+				info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
 
 				crate::service::start_node(config, polkadot_config, id)
 					.await
 					.map(|r| r.0)
 					.map_err(Into::into)
 			})
-		}
+		},
 	}
 }
 
@@ -357,11 +368,7 @@ impl CliConfiguration<Self> for RelayChainCli {
 	fn chain_id(&self, is_dev: bool) -> Result<String> {
 		let chain_id = self.base.base.chain_id(is_dev)?;
 
-		Ok(if chain_id.is_empty() {
-			self.chain_id.clone().unwrap_or_default()
-		} else {
-			chain_id
-		})
+		Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id })
 	}
 
 	fn role(&self, is_dev: bool) -> Result<sc_service::Role> {
@@ -408,7 +415,10 @@ impl CliConfiguration<Self> for RelayChainCli {
 		self.base.base.announce_block()
 	}
 
-	fn telemetry_endpoints(&self, chain_spec: &Box<dyn ChainSpec>) -> Result<Option<sc_telemetry::TelemetryEndpoints>> {
+	fn telemetry_endpoints(
+		&self,
+		chain_spec: &Box<dyn ChainSpec>,
+	) -> Result<Option<sc_telemetry::TelemetryEndpoints>> {
 		self.base.base.telemetry_endpoints(chain_spec)
 	}
 }
diff --git a/bridges/bin/rialto-parachain/node/src/service.rs b/bridges/bin/rialto-parachain/node/src/service.rs
index cbd10ca2aa218e474645c875ee52137ea94243b5..65a8e7bb65c579e55b42b8d0fdaef3058f0a4b96 100644
--- a/bridges/bin/rialto-parachain/node/src/service.rs
+++ b/bridges/bin/rialto-parachain/node/src/service.rs
@@ -21,7 +21,9 @@ use std::sync::Arc;
 use rialto_parachain_runtime::RuntimeApi;
 
 // Cumulus Imports
-use cumulus_client_consensus_aura::{build_aura_consensus, BuildAuraConsensusParams, SlotProportion};
+use cumulus_client_consensus_aura::{
+	build_aura_consensus, BuildAuraConsensusParams, SlotProportion,
+};
 use cumulus_client_consensus_common::ParachainConsensus;
 use cumulus_client_network::build_block_announce_validator;
 use cumulus_client_service::{
@@ -77,8 +79,14 @@ pub fn new_partial<RuntimeApi, Executor, BIQ>(
 		TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
 		TFullBackend<Block>,
 		(),
-		sc_consensus::DefaultImportQueue<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
-		sc_transaction_pool::FullPool<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
+		sc_transaction_pool::FullPool<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
 		(Option<Telemetry>, Option<TelemetryWorkerHandle>),
 	>,
 	sc_service::Error,
@@ -91,8 +99,10 @@ where
 	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
 		+ sp_api::Metadata<Block>
 		+ sp_session::SessionKeys<Block>
-		+ sp_api::ApiExt<Block, StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>>
-		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_api::ApiExt<
+			Block,
+			StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
+		> + sp_offchain::OffchainWorkerApi<Block>
 		+ sp_block_builder::BlockBuilder<Block>,
 	sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
 	Executor: NativeExecutionDispatch + 'static,
@@ -102,7 +112,10 @@ where
 		Option<TelemetryHandle>,
 		&TaskManager,
 	) -> Result<
-		sc_consensus::DefaultImportQueue<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
 		sc_service::Error,
 	>,
 {
@@ -123,11 +136,12 @@ where
 		config.max_runtime_instances,
 	);
 
-	let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, _>(
-		config,
-		telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-		executor,
-	)?;
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, _>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
 	let client = Arc::new(client);
 
 	let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
@@ -189,8 +203,10 @@ where
 	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
 		+ sp_api::Metadata<Block>
 		+ sp_session::SessionKeys<Block>
-		+ sp_api::ApiExt<Block, StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>>
-		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_api::ApiExt<
+			Block,
+			StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
+		> + sp_offchain::OffchainWorkerApi<Block>
 		+ sp_block_builder::BlockBuilder<Block>
 		+ cumulus_primitives_core::CollectCollationInfo<Block>,
 	sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
@@ -206,7 +222,10 @@ where
 		Option<TelemetryHandle>,
 		&TaskManager,
 	) -> Result<
-		sc_consensus::DefaultImportQueue<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
 		sc_service::Error,
 	>,
 	BIC: FnOnce(
@@ -215,14 +234,19 @@ where
 		Option<TelemetryHandle>,
 		&TaskManager,
 		&polkadot_service::NewFull<polkadot_service::Client>,
-		Arc<sc_transaction_pool::FullPool<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>>,
+		Arc<
+			sc_transaction_pool::FullPool<
+				Block,
+				TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+			>,
+		>,
 		Arc<NetworkService<Block, Hash>>,
 		SyncCryptoStorePtr,
 		bool,
 	) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
 {
 	if matches!(parachain_config.role, Role::Light) {
-		return Err("Light client not supported!".into());
+		return Err("Light client not supported!".into())
 	}
 
 	let parachain_config = prepare_node_config(parachain_config);
@@ -231,12 +255,11 @@ where
 	let (mut telemetry, telemetry_worker_handle) = params.other;
 
 	let relay_chain_full_node =
-		cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle).map_err(
-			|e| match e {
+		cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle)
+			.map_err(|e| match e {
 				polkadot_service::Error::Sub(x) => x,
 				s => format!("{}", s).into(),
-			},
-		)?;
+			})?;
 
 	let client = params.client.clone();
 	let backend = params.backend.clone();
@@ -253,16 +276,17 @@ where
 	let transaction_pool = params.transaction_pool.clone();
 	let mut task_manager = params.task_manager;
 	let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
-	let (network, system_rpc_tx, start_network) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &parachain_config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue: import_queue.clone(),
-		on_demand: None,
-		block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
-		warp_sync: None,
-	})?;
+	let (network, system_rpc_tx, start_network) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &parachain_config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue: import_queue.clone(),
+			on_demand: None,
+			block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
+			warp_sync: None,
+		})?;
 
 	let rpc_client = client.clone();
 	let rpc_extensions_builder = Box::new(move |_, _| Ok(rpc_ext_builder(rpc_client.clone())));
@@ -348,26 +372,33 @@ pub fn parachain_build_import_queue(
 > {
 	let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
 
-	cumulus_client_consensus_aura::import_queue::<sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _>(
-		cumulus_client_consensus_aura::ImportQueueParams {
-			block_import: client.clone(),
-			client: client.clone(),
-			create_inherent_data_providers: move |_, _| async move {
-				let time = sp_timestamp::InherentDataProvider::from_system_time();
+	cumulus_client_consensus_aura::import_queue::<
+		sp_consensus_aura::sr25519::AuthorityPair,
+		_,
+		_,
+		_,
+		_,
+		_,
+		_,
+	>(cumulus_client_consensus_aura::ImportQueueParams {
+		block_import: client.clone(),
+		client: client.clone(),
+		create_inherent_data_providers: move |_, _| async move {
+			let time = sp_timestamp::InherentDataProvider::from_system_time();
 
-				let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+			let slot =
+				sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
 					*time,
 					slot_duration.slot_duration(),
 				);
 
-				Ok((time, slot))
-			},
-			registry: config.prometheus_registry(),
-			can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
-			spawner: &task_manager.spawn_essential_handle(),
-			telemetry,
+			Ok((time, slot))
 		},
-	)
+		registry: config.prometheus_registry(),
+		can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
+		spawner: &task_manager.spawn_essential_handle(),
+		telemetry,
+	})
 	.map_err(Into::into)
 }
 
@@ -438,7 +469,9 @@ pub async fn start_node(
 						);
 
 						let parachain_inherent = parachain_inherent.ok_or_else(|| {
-							Box::<dyn std::error::Error + Send + Sync>::from("Failed to create parachain inherent")
+							Box::<dyn std::error::Error + Send + Sync>::from(
+								"Failed to create parachain inherent",
+							)
 						})?;
 						Ok((time, slot, parachain_inherent))
 					}
diff --git a/bridges/bin/rialto-parachain/runtime/src/lib.rs b/bridges/bin/rialto-parachain/runtime/src/lib.rs
index 613c444a28f274513f54d2ad3fe26c9e335dcd5b..93118e3c9dd3b9435de39aceda4cc8a1b0183fde 100644
--- a/bridges/bin/rialto-parachain/runtime/src/lib.rs
+++ b/bridges/bin/rialto-parachain/runtime/src/lib.rs
@@ -63,10 +63,11 @@ use pallet_xcm::XcmPassthrough;
 use polkadot_parachain::primitives::Sibling;
 use xcm::latest::prelude::*;
 use xcm_builder::{
-	AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, CurrencyAdapter, EnsureXcmOrigin,
-	FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset, ParentAsSuperuser, ParentIsDefault,
-	RelayChainAsNative, SiblingParachainAsNative, SiblingParachainConvertsVia, SignedAccountId32AsNative,
-	SignedToAccountId32, SovereignSignedViaLocation, TakeWeightCredit, UsingComponents,
+	AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, CurrencyAdapter,
+	EnsureXcmOrigin, FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset,
+	ParentAsSuperuser, ParentIsDefault, RelayChainAsNative, SiblingParachainAsNative,
+	SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
+	SovereignSignedViaLocation, TakeWeightCredit, UsingComponents,
 };
 use xcm_executor::{Config, XcmExecutor};
 
@@ -110,8 +111,13 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signatu
 /// Extrinsic type that has already been checked.
 pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
 /// Executive: handles dispatch to the various modules.
-pub type Executive =
-	frame_executive::Executive<Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPallets>;
+pub type Executive = frame_executive::Executive<
+	Runtime,
+	Block,
+	frame_system::ChainContext<Runtime>,
+	Runtime,
+	AllPallets,
+>;
 
 impl_opaque_keys! {
 	pub struct SessionKeys {
@@ -159,14 +165,11 @@ pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
 /// The version information used to identify this runtime when compiled natively.
 #[cfg(feature = "std")]
 pub fn native_version() -> NativeVersion {
-	NativeVersion {
-		runtime_version: VERSION,
-		can_author_with: Default::default(),
-	}
+	NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
 }
 
-/// We assume that approximately 10 percent of the block weight is consumed by `on_initalize` handlers.
-/// This is used to limit the maximal weight of a single extrinsic.
+/// We assume that approximately 10 percent of the block weight is consumed by `on_initalize`
+/// handlers. This is used to limit the maximal weight of a single extrinsic.
 const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10);
 /// We allow `Normal` extrinsics to fill up the block up to 75 percent, the rest can be used
 /// by  Operational  extrinsics.
@@ -664,12 +667,13 @@ impl cumulus_pallet_parachain_system::CheckInherents<Block> for CheckInherents {
 			.read_slot()
 			.expect("Could not read the relay chain slot from the proof");
 
-		let inherent_data = cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration(
-			relay_chain_slot,
-			sp_std::time::Duration::from_secs(6),
-		)
-		.create_inherent_data()
-		.expect("Could not create the timestamp inherent data");
+		let inherent_data =
+			cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration(
+				relay_chain_slot,
+				sp_std::time::Duration::from_secs(6),
+			)
+			.create_inherent_data()
+			.expect("Could not create the timestamp inherent data");
 
 		inherent_data.check_extrinsics(block)
 	}
diff --git a/bridges/bin/rialto/node/src/chain_spec.rs b/bridges/bin/rialto/node/src/chain_spec.rs
index 68a2928ed289065822e537f11ade320592811192..8c6b147f48204cfc86a7e5aec0ded82591a5c667 100644
--- a/bridges/bin/rialto/node/src/chain_spec.rs
+++ b/bridges/bin/rialto/node/src/chain_spec.rs
@@ -17,9 +17,9 @@
 use bp_rialto::derive_account_from_millau_id;
 use polkadot_primitives::v1::{AssignmentId, ValidatorId};
 use rialto_runtime::{
-	AccountId, BabeConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauMessagesConfig, BridgeRialtoPoaConfig,
-	ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig,
-	WASM_BINARY,
+	AccountId, BabeConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauMessagesConfig,
+	BridgeRialtoPoaConfig, ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig,
+	SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
 };
 use serde_json::json;
 use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
@@ -62,14 +62,7 @@ where
 /// Helper function to generate authority keys.
 pub fn get_authority_keys_from_seed(
 	s: &str,
-) -> (
-	AccountId,
-	BabeId,
-	GrandpaId,
-	ValidatorId,
-	AssignmentId,
-	AuthorityDiscoveryId,
-) {
+) -> (AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) {
 	(
 		get_account_id_from_seed::<sr25519::Public>(s),
 		get_from_seed::<BabeId>(s),
@@ -195,13 +188,7 @@ fn session_keys(
 	para_assignment: AssignmentId,
 	authority_discovery: AuthorityDiscoveryId,
 ) -> SessionKeys {
-	SessionKeys {
-		babe,
-		grandpa,
-		para_validator,
-		para_assignment,
-		authority_discovery,
-	}
+	SessionKeys { babe, grandpa, para_validator, para_assignment, authority_discovery }
 }
 
 fn testnet_genesis(
@@ -231,9 +218,7 @@ fn testnet_genesis(
 		},
 		bridge_rialto_poa: load_rialto_poa_bridge_config(),
 		bridge_kovan: load_kovan_bridge_config(),
-		grandpa: GrandpaConfig {
-			authorities: Vec::new(),
-		},
+		grandpa: GrandpaConfig { authorities: Vec::new() },
 		sudo: SudoConfig { key: root_key },
 		session: SessionConfig {
 			keys: initial_authorities
@@ -242,7 +227,13 @@ fn testnet_genesis(
 					(
 						x.0.clone(),
 						x.0.clone(),
-						session_keys(x.1.clone(), x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()),
+						session_keys(
+							x.1.clone(),
+							x.2.clone(),
+							x.3.clone(),
+							x.4.clone(),
+							x.5.clone(),
+						),
 					)
 				})
 				.collect::<Vec<_>>(),
@@ -320,9 +311,7 @@ fn load_kovan_bridge_config() -> BridgeKovanConfig {
 #[test]
 fn derived_dave_account_is_as_expected() {
 	let dave = get_account_id_from_seed::<sr25519::Public>("Dave");
-	let derived: AccountId = derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave));
-	assert_eq!(
-		derived.to_string(),
-		"5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string()
-	);
+	let derived: AccountId =
+		derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave));
+	assert_eq!(derived.to_string(), "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string());
 }
diff --git a/bridges/bin/rialto/node/src/command.rs b/bridges/bin/rialto/node/src/command.rs
index 308cb10344410221753a068c34d55b1b5ebf3b50..6f841a9d67f1ded579e053cdf592b684bc3855db 100644
--- a/bridges/bin/rialto/node/src/command.rs
+++ b/bridges/bin/rialto/node/src/command.rs
@@ -14,8 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{Cli, Subcommand};
-use crate::service::new_partial;
+use crate::{
+	cli::{Cli, Subcommand},
+	service::new_partial,
+};
 use rialto_runtime::{Block, RuntimeApi};
 use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli};
 use sc_service::PartialComponents;
@@ -73,7 +75,7 @@ pub fn run() -> sc_cli::Result<()> {
 	));
 
 	match &cli.subcommand {
-		Some(Subcommand::Benchmark(cmd)) => {
+		Some(Subcommand::Benchmark(cmd)) =>
 			if cfg!(feature = "runtime-benchmarks") {
 				let runner = cli.create_runner(cmd)?;
 
@@ -84,8 +86,7 @@ pub fn run() -> sc_cli::Result<()> {
 				You can enable it with `--features runtime-benchmarks`."
 				);
 				Ok(())
-			}
-		}
+			},
 		Some(Subcommand::Key(cmd)) => cmd.run(&cli),
 		Some(Subcommand::Sign(cmd)) => cmd.run(),
 		Some(Subcommand::Verify(cmd)) => cmd.run(),
@@ -93,69 +94,57 @@ pub fn run() -> sc_cli::Result<()> {
 		Some(Subcommand::BuildSpec(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
-		}
+		},
 		Some(Subcommand::CheckBlock(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|mut config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&mut config).map_err(service_error)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|mut config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&mut config).map_err(service_error)?;
+				let PartialComponents { client, task_manager, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, config.database), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportState(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|mut config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&mut config).map_err(service_error)?;
+				let PartialComponents { client, task_manager, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, config.chain_spec), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|mut config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&mut config).map_err(service_error)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.database))
-		}
+		},
 		Some(Subcommand::Revert(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|mut config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					backend,
-					..
-				} = new_partial(&mut config).map_err(service_error)?;
+				let PartialComponents { client, task_manager, backend, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, backend), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::Inspect(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.sync_run(|config| cmd.run::<Block, RuntimeApi, crate::service::ExecutorDispatch>(config))
-		}
+			runner.sync_run(|config| {
+				cmd.run::<Block, RuntimeApi, crate::service::ExecutorDispatch>(config)
+			})
+		},
 		Some(Subcommand::PvfPrepareWorker(cmd)) => {
 			let mut builder = sc_cli::LoggerBuilder::new("");
 			builder.with_colors(false);
@@ -163,7 +152,7 @@ pub fn run() -> sc_cli::Result<()> {
 
 			polkadot_node_core_pvf::prepare_worker_entrypoint(&cmd.socket_path);
 			Ok(())
-		}
+		},
 		Some(crate::cli::Subcommand::PvfExecuteWorker(cmd)) => {
 			let mut builder = sc_cli::LoggerBuilder::new("");
 			builder.with_colors(false);
@@ -171,7 +160,7 @@ pub fn run() -> sc_cli::Result<()> {
 
 			polkadot_node_core_pvf::execute_worker_entrypoint(&cmd.socket_path);
 			Ok(())
-		}
+		},
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
 
@@ -192,7 +181,7 @@ pub fn run() -> sc_cli::Result<()> {
 						.map_err(service_error),
 				}
 			})
-		}
+		},
 	}
 }
 
diff --git a/bridges/bin/rialto/node/src/overseer.rs b/bridges/bin/rialto/node/src/overseer.rs
index 3742bcdc8927d82783bc08d509e02c506be9aa32..1c381c1df8fdde3fc53eb6bcdbacbf28dd2e0341 100644
--- a/bridges/bin/rialto/node/src/overseer.rs
+++ b/bridges/bin/rialto/node/src/overseer.rs
@@ -87,7 +87,8 @@ where
 	pub pov_req_receiver: IncomingRequestReceiver<request_v1::PoVFetchingRequest>,
 	pub chunk_req_receiver: IncomingRequestReceiver<request_v1::ChunkFetchingRequest>,
 	pub collation_req_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
-	pub available_data_req_receiver: IncomingRequestReceiver<request_v1::AvailableDataFetchingRequest>,
+	pub available_data_req_receiver:
+		IncomingRequestReceiver<request_v1::AvailableDataFetchingRequest>,
 	pub statement_req_receiver: IncomingRequestReceiver<request_v1::StatementFetchingRequest>,
 	pub dispute_req_receiver: IncomingRequestReceiver<request_v1::DisputeRequest>,
 	/// Prometheus registry, commonly used for production systems, less so for test.
@@ -143,7 +144,10 @@ pub fn create_default_subsystems<Spawner, RuntimeClient>(
 		ProvisionerSubsystem<Spawner>,
 		RuntimeApiSubsystem<RuntimeClient>,
 		AvailabilityStoreSubsystem,
-		NetworkBridgeSubsystem<Arc<sc_network::NetworkService<Block, Hash>>, AuthorityDiscoveryService>,
+		NetworkBridgeSubsystem<
+			Arc<sc_network::NetworkService<Block, Hash>>,
+			AuthorityDiscoveryService,
+		>,
 		ChainApiSubsystem<RuntimeClient>,
 		CollationGenerationSubsystem,
 		CollatorProtocolSubsystem,
@@ -167,10 +171,7 @@ where
 	let all_subsystems = AllSubsystems {
 		availability_distribution: AvailabilityDistributionSubsystem::new(
 			keystore.clone(),
-			IncomingRequestReceivers {
-				pov_req_receiver,
-				chunk_req_receiver,
-			},
+			IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver },
 			Metrics::register(registry)?,
 		),
 		availability_recovery: AvailabilityRecoverySubsystem::with_chunks_only(
@@ -212,7 +213,11 @@ where
 			Metrics::register(registry)?,
 		),
 		provisioner: ProvisionerSubsystem::new(spawner.clone(), (), Metrics::register(registry)?),
-		runtime_api: RuntimeApiSubsystem::new(runtime_client, Metrics::register(registry)?, spawner),
+		runtime_api: RuntimeApiSubsystem::new(
+			runtime_client,
+			Metrics::register(registry)?,
+			spawner,
+		),
 		statement_distribution: StatementDistributionSubsystem::new(
 			keystore.clone(),
 			statement_req_receiver,
@@ -287,6 +292,7 @@ impl OverseerGen for RealOverseerGen {
 
 		let all_subsystems = create_default_subsystems::<Spawner, RuntimeClient>(args)?;
 
-		Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner).map_err(|e| e.into())
+		Overseer::new(leaves, all_subsystems, registry, runtime_client, spawner)
+			.map_err(|e| e.into())
 	}
 }
diff --git a/bridges/bin/rialto/node/src/parachains_db.rs b/bridges/bin/rialto/node/src/parachains_db.rs
index 976191fc807884a3a8e8e7452cb2b3afc9d6d1dc..bf2052043c98797e5f2e594b75ada58397f4d109 100644
--- a/bridges/bin/rialto/node/src/parachains_db.rs
+++ b/bridges/bin/rialto/node/src/parachains_db.rs
@@ -17,7 +17,8 @@
 //! This is almost 1:1 copy of `node/service/parachains_db/mod.rs` file from Polkadot repository.
 //! The only exception is that we don't support db upgrades => no `upgrade.rs` module.
 
-use {kvdb::KeyValueDB, std::io, std::path::PathBuf, std::sync::Arc};
+use kvdb::KeyValueDB;
+use std::{io, path::PathBuf, sync::Arc};
 
 mod columns {
 	pub const NUM_COLUMNS: u32 = 5;
@@ -66,11 +67,7 @@ pub struct CacheSizes {
 
 impl Default for CacheSizes {
 	fn default() -> Self {
-		CacheSizes {
-			availability_data: 25,
-			availability_meta: 1,
-			approval_data: 5,
-		}
+		CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 }
 	}
 }
 
diff --git a/bridges/bin/rialto/node/src/service.rs b/bridges/bin/rialto/node/src/service.rs
index 19743933c1eaff6fd8d1e298958518773ec7b7ca..8a2a38a59c60b598cb60cf164027c1054b1d0150 100644
--- a/bridges/bin/rialto/node/src/service.rs
+++ b/bridges/bin/rialto/node/src/service.rs
@@ -19,7 +19,8 @@
 //! The code is mostly copy of `service/src/lib.rs` file from Polkadot repository
 //! without optional functions.
 
-// this warning comes from Error enum (sc_cli::Error in particular) && it isn't easy to use box there
+// this warning comes from Error enum (sc_cli::Error in particular) && it isn't easy to use box
+// there
 #![allow(clippy::large_enum_variant)]
 // this warning comes from `sc_service::PartialComponents` type
 #![allow(clippy::type_complexity)]
@@ -46,14 +47,12 @@ use sp_runtime::traits::{BlakeTwo256, Block as BlockT};
 use std::{sync::Arc, time::Duration};
 use substrate_prometheus_endpoint::Registry;
 
-pub use {
-	polkadot_overseer::{Handle, Overseer, OverseerHandle},
-	polkadot_primitives::v1::ParachainHost,
-	sc_client_api::AuxStore,
-	sp_authority_discovery::AuthorityDiscoveryApi,
-	sp_blockchain::HeaderBackend,
-	sp_consensus_babe::BabeApi,
-};
+pub use polkadot_overseer::{Handle, Overseer, OverseerHandle};
+pub use polkadot_primitives::v1::ParachainHost;
+pub use sc_client_api::AuxStore;
+pub use sp_authority_discovery::AuthorityDiscoveryApi;
+pub use sp_blockchain::HeaderBackend;
+pub use sp_consensus_babe::BabeApi;
 
 pub type Executor = NativeElseWasmExecutor<ExecutorDispatch>;
 
@@ -108,9 +107,11 @@ pub enum Error {
 type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
 type FullBackend = sc_service::TFullBackend<Block>;
 type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
-type FullGrandpaBlockImport = sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>;
+type FullGrandpaBlockImport =
+	sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>;
 type FullTransactionPool = sc_transaction_pool::FullPool<Block, FullClient>;
-type FullBabeBlockImport = sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>;
+type FullBabeBlockImport =
+	sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>;
 type FullBabeLink = sc_consensus_babe::BabeLink<Block>;
 type FullGrandpaLink = sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>;
 
@@ -125,8 +126,11 @@ pub trait RequiredApiCollection:
 	+ sp_finality_grandpa::GrandpaApi<Block>
 	+ polkadot_primitives::v1::ParachainHost<Block>
 	+ sp_block_builder::BlockBuilder<Block>
-	+ frame_system_rpc_runtime_api::AccountNonceApi<Block, bp_rialto::AccountId, rialto_runtime::Index>
-	+ pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
+	+ frame_system_rpc_runtime_api::AccountNonceApi<
+		Block,
+		bp_rialto::AccountId,
+		rialto_runtime::Index,
+	> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
 	+ sp_api::Metadata<Block>
 	+ sp_offchain::OffchainWorkerApi<Block>
 	+ sp_session::SessionKeys<Block>
@@ -144,8 +148,11 @@ where
 		+ sp_finality_grandpa::GrandpaApi<Block>
 		+ polkadot_primitives::v1::ParachainHost<Block>
 		+ sp_block_builder::BlockBuilder<Block>
-		+ frame_system_rpc_runtime_api::AccountNonceApi<Block, bp_rialto::AccountId, rialto_runtime::Index>
-		+ pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
+		+ frame_system_rpc_runtime_api::AccountNonceApi<
+			Block,
+			bp_rialto::AccountId,
+			rialto_runtime::Index,
+		> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
 		+ sp_api::Metadata<Block>
 		+ sp_offchain::OffchainWorkerApi<Block>
 		+ sp_session::SessionKeys<Block>
@@ -210,11 +217,12 @@ where
 		config.max_runtime_instances,
 	);
 
-	let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
-		config,
-		telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-		executor,
-	)?;
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
@@ -232,13 +240,14 @@ where
 		client.clone(),
 	);
 
-	let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import_with_authority_set_hard_forks(
-		client.clone(),
-		&(client.clone() as Arc<_>),
-		select_chain.clone(),
-		Vec::new(),
-		telemetry.as_ref().map(|x| x.handle()),
-	)?;
+	let (grandpa_block_import, grandpa_link) =
+		sc_finality_grandpa::block_import_with_authority_set_hard_forks(
+			client.clone(),
+			&(client.clone() as Arc<_>),
+			select_chain.clone(),
+			Vec::new(),
+			telemetry.as_ref().map(|x| x.handle()),
+		)?;
 	let justification_import = grandpa_block_import.clone();
 
 	let babe_config = sc_consensus_babe::Config::get_or_compute(&*client)?;
@@ -255,10 +264,11 @@ where
 		move |_, ()| async move {
 			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
+			let slot =
+				sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
+					*timestamp,
+					slot_duration,
+				);
 
 			Ok((timestamp, slot))
 		},
@@ -295,8 +305,10 @@ where
 
 			let shared_voter_state = shared_voter_state.clone();
 
-			let finality_proof_provider =
-				GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
+			let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
+				backend,
+				Some(shared_authority_set.clone()),
+			);
 
 			let mut io = jsonrpc_core::IoHandler::default();
 			io.extend_with(SystemApi::to_delegate(FullSystem::new(
@@ -325,13 +337,7 @@ where
 		select_chain,
 		import_queue,
 		transaction_pool,
-		other: (
-			rpc_extensions_builder,
-			import_setup,
-			rpc_setup,
-			slot_duration,
-			telemetry,
-		),
+		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry),
 	})
 }
 
@@ -344,7 +350,7 @@ pub struct NewFull<C> {
 	pub backend: Arc<FullBackend>,
 }
 
-/// The maximum number of active leaves we forward to the [`Overseer`] on startup.
+/// The maximum number of active leaves we forward to the [`Overseer`] on start up.
 const MAX_ACTIVE_LEAVES: usize = 4;
 
 /// Returns the active leaves the overseer should start with.
@@ -370,16 +376,12 @@ where
 
 			// Only consider leaves that are in maximum an uncle of the best block.
 			if number < best_block.number().saturating_sub(1) || hash == best_block.hash() {
-				return None;
+				return None
 			}
 
 			let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;
 
-			Some(BlockInfo {
-				hash,
-				parent_hash,
-				number,
-			})
+			Some(BlockInfo { hash, parent_hash, number })
 		})
 		.collect::<Vec<_>>();
 
@@ -411,7 +413,8 @@ where
 
 	let role = config.role.clone();
 	let force_authoring = config.force_authoring;
-	let backoff_authoring_blocks = Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default());
+	let backoff_authoring_blocks =
+		Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default());
 
 	let disable_grandpa = config.disable_grandpa;
 	let name = config.network.node_name.clone();
@@ -435,18 +438,11 @@ where
 	// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
 	// anything in terms of behaviour, but makes the logs more consistent with the other
 	// Substrate nodes.
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
 
 	{
 		use polkadot_network_bridge::{peer_sets_info, IsAuthority};
-		let is_authority = if role.is_authority() {
-			IsAuthority::Yes
-		} else {
-			IsAuthority::No
-		};
+		let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
 		config.network.extra_sets.extend(peer_sets_info(is_authority));
 	}
 
@@ -468,20 +464,25 @@ where
 		import_setup.1.shared_authority_set().clone(),
 	));
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: None,
-		block_announce_validator_builder: None,
-		warp_sync: Some(warp_sync),
-	})?;
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: None,
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
 	if config.offchain_worker.enabled {
-		let _ =
-			sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+		let _ = sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
 	}
 
 	let parachains_db = crate::parachains_db::open_creating(
@@ -551,12 +552,13 @@ where
 			// don't publish our addresses when we're only a collator
 			sc_authority_discovery::Role::Discover
 		};
-		let dht_event_stream = network.event_stream("authority-discovery").filter_map(|e| async move {
-			match e {
-				Event::Dht(e) => Some(e),
-				_ => None,
-			}
-		});
+		let dht_event_stream =
+			network.event_stream("authority-discovery").filter_map(|e| async move {
+				match e {
+					Event::Dht(e) => Some(e),
+					_ => None,
+				}
+			});
 		let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
 			sc_authority_discovery::WorkerConfig {
 				publish_non_global_ips: auth_disc_publish_non_global_ips,
@@ -569,22 +571,22 @@ where
 			prometheus_registry.clone(),
 		);
 
-		task_manager
-			.spawn_handle()
-			.spawn("authority-discovery-worker", worker.run());
+		task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run());
 		Some(service)
 	} else {
 		None
 	};
 
-	// we'd say let overseer_handler = authority_discovery_service.map(|authority_discovery_service|, ...),
-	// but in that case we couldn't use ? to propagate errors
+	// we'd say let overseer_handler =
+	// authority_discovery_service.map(|authority_discovery_service|, ...), but in that case we
+	// couldn't use ? to propagate errors
 	let local_keystore = keystore_container.local_keystore();
-	let maybe_params = local_keystore.and_then(move |k| authority_discovery_service.map(|a| (a, k)));
+	let maybe_params =
+		local_keystore.and_then(move |k| authority_discovery_service.map(|a| (a, k)));
 
 	let overseer_handle = if let Some((authority_discovery_service, keystore)) = maybe_params {
-		let (overseer, overseer_handle) =
-			overseer_gen.generate::<sc_service::SpawnTaskHandle, FullClient>(OverseerGenArgs {
+		let (overseer, overseer_handle) = overseer_gen
+			.generate::<sc_service::SpawnTaskHandle, FullClient>(OverseerGenArgs {
 				leaves: active_leaves,
 				keystore,
 				runtime_client: overseer_client.clone(),
@@ -635,7 +637,8 @@ where
 	};
 
 	if role.is_authority() {
-		let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
+		let can_author_with =
+			sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
 
 		let proposer = sc_basic_authorship::ProposerFactory::new(
 			task_manager.spawn_handle(),
@@ -646,10 +649,8 @@ where
 		);
 
 		let client_clone = client.clone();
-		let overseer_handle = overseer_handle
-			.as_ref()
-			.ok_or(Error::AuthoritiesRequireRealOverseer)?
-			.clone();
+		let overseer_handle =
+			overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone();
 		let slot_duration = babe_link.config().slot_duration();
 		let babe_config = sc_consensus_babe::BabeParams {
 			keystore: keystore_container.sync_keystore(),
@@ -671,7 +672,10 @@ where
 					.await
 					.map_err(Box::new)?;
 
-					let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider(&*client_clone, parent)?;
+					let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider(
+						&*client_clone,
+						parent,
+					)?;
 
 					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
@@ -698,11 +702,8 @@ where
 
 	// if the node isn't actively participating in consensus then it doesn't
 	// need a keystore, regardless of which protocol we use below.
-	let keystore_opt = if role.is_authority() {
-		Some(keystore_container.sync_keystore())
-	} else {
-		None
-	};
+	let keystore_opt =
+		if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
 
 	let config = sc_finality_grandpa::Config {
 		// FIXME substrate#1578 make this available through chainspec
@@ -740,23 +741,20 @@ where
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
 		};
 
-		task_manager
-			.spawn_essential_handle()
-			.spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"grandpa-voter",
+			sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
+		);
 	}
 
 	network_starter.start_network();
 
-	Ok(NewFull {
-		task_manager,
-		client,
-		overseer_handle,
-		network,
-		rpc_handlers,
-		backend,
-	})
+	Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend })
 }
 
-pub fn build_full(config: Configuration, overseer_gen: impl OverseerGen) -> Result<NewFull<Arc<FullClient>>, Error> {
+pub fn build_full(
+	config: Configuration,
+	overseer_gen: impl OverseerGen,
+) -> Result<NewFull<Arc<FullClient>>, Error> {
 	new_full(config, None, overseer_gen)
 }
diff --git a/bridges/bin/rialto/runtime/src/benches.rs b/bridges/bin/rialto/runtime/src/benches.rs
index 86d6b8361c635da70ea231cdc51f00c7969a2879..ce3f84069795a4e240a27157949ec836ed2c330d 100644
--- a/bridges/bin/rialto/runtime/src/benches.rs
+++ b/bridges/bin/rialto/runtime/src/benches.rs
@@ -22,7 +22,8 @@ use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource};
 use sp_std::vec;
 
 pub use crate::kovan::{
-	genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, PruningStrategy,
+	genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval,
+	PruningStrategy,
 };
 
 frame_support::parameter_types! {
diff --git a/bridges/bin/rialto/runtime/src/exchange.rs b/bridges/bin/rialto/runtime/src/exchange.rs
index cdb9d9db62e091e45af9ccdb19b77cbab2421971..9f34707848fea9c7d2706d783bfe35868a987c6a 100644
--- a/bridges/bin/rialto/runtime/src/exchange.rs
+++ b/bridges/bin/rialto/runtime/src/exchange.rs
@@ -28,7 +28,8 @@
 //! 5) receive tokens by providing proof-of-inclusion of PoA transaction.
 
 use bp_currency_exchange::{
-	Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, Result as ExchangeResult,
+	Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction,
+	Result as ExchangeResult,
 };
 use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt};
 use codec::{Decode, Encode};
@@ -87,7 +88,7 @@ impl MaybeLockFundsTransaction for EthTransaction {
 				tx.unsigned.to,
 			);
 
-			return Err(ExchangeError::InvalidTransaction);
+			return Err(ExchangeError::InvalidTransaction)
 		}
 
 		let mut recipient_raw = sp_core::H256::default();
@@ -100,8 +101,8 @@ impl MaybeLockFundsTransaction for EthTransaction {
 					len,
 				);
 
-				return Err(ExchangeError::InvalidRecipient);
-			}
+				return Err(ExchangeError::InvalidRecipient)
+			},
 		}
 		let amount = tx.unsigned.value.low_u128();
 
@@ -112,7 +113,7 @@ impl MaybeLockFundsTransaction for EthTransaction {
 				tx.unsigned.value,
 			);
 
-			return Err(ExchangeError::InvalidAmount);
+			return Err(ExchangeError::InvalidAmount)
 		}
 
 		Ok(LockFundsTransaction {
@@ -213,10 +214,7 @@ mod tests {
 
 	#[test]
 	fn invalid_transaction_rejected() {
-		assert_eq!(
-			EthTransaction::parse(&Vec::new()),
-			Err(ExchangeError::InvalidTransaction),
-		);
+		assert_eq!(EthTransaction::parse(&Vec::new()), Err(ExchangeError::InvalidTransaction),);
 	}
 
 	#[test]
diff --git a/bridges/bin/rialto/runtime/src/kovan.rs b/bridges/bin/rialto/runtime/src/kovan.rs
index 15c73fc6ea3f0dd74258b4b401649bd82db77654..95b4f8c42f03ba47045375f7c3e91d6f607ab498 100644
--- a/bridges/bin/rialto/runtime/src/kovan.rs
+++ b/bridges/bin/rialto/runtime/src/kovan.rs
@@ -21,8 +21,8 @@ use bp_header_chain::InclusionProofVerifier;
 use frame_support::RuntimeDebug;
 use hex_literal::hex;
 use pallet_bridge_eth_poa::{
-	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, ValidatorsConfiguration,
-	ValidatorsSource,
+	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy,
+	ValidatorsConfiguration, ValidatorsSource,
 };
 use sp_std::prelude::*;
 
@@ -102,11 +102,14 @@ pub fn genesis_header() -> AuraHeader {
 		timestamp: 0,
 		number: 0,
 		author: Default::default(),
-		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
-		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(),
+		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
+		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
+			.into(),
 		extra_data: vec![],
 		state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(),
-		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
+		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
 		log_bloom: Default::default(),
 		gas_used: Default::default(),
 		gas_limit: 6000000.into(),
@@ -114,8 +117,9 @@ pub fn genesis_header() -> AuraHeader {
 		seal: vec![
 			vec![128],
 			vec![
-				184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 			],
 		],
 	}
@@ -153,12 +157,17 @@ impl InclusionProofVerifier for KovanBlockchain {
 	type Transaction = RawTransaction;
 	type TransactionInclusionProof = EthereumTransactionInclusionProof;
 
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction> {
-		let is_transaction_finalized =
-			crate::BridgeKovan::verify_transaction_finalized(proof.block, proof.index, &proof.proof);
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction> {
+		let is_transaction_finalized = crate::BridgeKovan::verify_transaction_finalized(
+			proof.block,
+			proof.index,
+			&proof.proof,
+		);
 
 		if !is_transaction_finalized {
-			return None;
+			return None
 		}
 
 		proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone())
diff --git a/bridges/bin/rialto/runtime/src/lib.rs b/bridges/bin/rialto/runtime/src/lib.rs
index b440000ae6b7b45675c11234b8872572c165dc7e..84908fd52bf0a0aa59664cccbbae0aa28fd5ee4e 100644
--- a/bridges/bin/rialto/runtime/src/lib.rs
+++ b/bridges/bin/rialto/runtime/src/lib.rs
@@ -41,15 +41,19 @@ pub mod rialto_poa;
 
 use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge};
 
-use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge};
-use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
+use bridge_runtime_common::messages::{
+	source::estimate_message_dispatch_and_delivery_fee, MessageBridge,
+};
+use pallet_grandpa::{
+	fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList,
+};
 use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo};
 use sp_api::impl_runtime_apis;
 use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
 use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
-use sp_runtime::traits::{AccountIdLookup, Block as BlockT, NumberFor, OpaqueKeys};
 use sp_runtime::{
 	create_runtime_str, generic, impl_opaque_keys,
+	traits::{AccountIdLookup, Block as BlockT, NumberFor, OpaqueKeys},
 	transaction_validity::{TransactionSource, TransactionValidity},
 	ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill,
 };
@@ -149,10 +153,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 /// The version information used to identify this runtime when compiled natively.
 #[cfg(feature = "std")]
 pub fn native_version() -> NativeVersion {
-	NativeVersion {
-		runtime_version: VERSION,
-		can_author_with: Default::default(),
-	}
+	NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
 }
 
 parameter_types! {
@@ -238,10 +239,14 @@ impl pallet_babe::Config for Runtime {
 
 	// equivocation related configuration - we don't expect any equivocations in our testnets
 	type KeyOwnerProofSystem = ();
-	type KeyOwnerProof =
-		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::Proof;
-	type KeyOwnerIdentification =
-		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, pallet_babe::AuthorityId)>>::IdentificationTuple;
+	type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		pallet_babe::AuthorityId,
+	)>>::Proof;
+	type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		pallet_babe::AuthorityId,
+	)>>::IdentificationTuple;
 	type HandleEquivocation = ();
 
 	type DisabledValidators = ();
@@ -308,13 +313,18 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 	type Recipient = AccountId;
 	type Amount = Balance;
 
-	fn deposit_into(recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> {
-		// let balances module make all checks for us (it won't allow depositing lower than existential
-		// deposit, balance overflow, ...)
-		let deposited = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(&recipient, amount);
+	fn deposit_into(
+		recipient: Self::Recipient,
+		amount: Self::Amount,
+	) -> bp_currency_exchange::Result<()> {
+		// let balances module make all checks for us (it won't allow depositing lower than
+		// existential deposit, balance overflow, ...)
+		let deposited = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
+			&recipient, amount,
+		);
 
-		// I'm dropping deposited here explicitly to illustrate the fact that it'll update `TotalIssuance`
-		// on drop
+		// I'm dropping deposited here explicitly to illustrate the fact that it'll update
+		// `TotalIssuance` on drop
 		let deposited_amount = deposited.peek();
 		drop(deposited);
 
@@ -332,7 +342,7 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 				);
 
 				Ok(())
-			}
+			},
 			_ if deposited_amount == 0 => {
 				log::error!(
 					target: "runtime",
@@ -342,7 +352,7 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 				);
 
 				Err(bp_currency_exchange::Error::DepositFailed)
-			}
+			},
 			_ => {
 				log::error!(
 					target: "runtime",
@@ -354,7 +364,7 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 
 				// we can't return DepositFailed error here, because storage changes were made
 				Err(bp_currency_exchange::Error::DepositPartiallyFailed)
-			}
+			},
 		}
 	}
 }
@@ -363,9 +373,12 @@ impl pallet_grandpa::Config for Runtime {
 	type Event = Event;
 	type Call = Call;
 	type KeyOwnerProofSystem = ();
-	type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
-	type KeyOwnerIdentification =
-		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
+	type KeyOwnerProof =
+		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
+	type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		GrandpaId,
+	)>>::IdentificationTuple;
 	type HandleEquivocation = ();
 	// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
 	type WeightInfo = ();
@@ -529,12 +542,13 @@ impl pallet_bridge_messages::Config<WithMillauMessagesInstance> for Runtime {
 
 	type TargetHeaderChain = crate::millau_messages::Millau;
 	type LaneMessageVerifier = crate::millau_messages::ToMillauMessageVerifier;
-	type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
-		Runtime,
-		pallet_balances::Pallet<Runtime>,
-		GetDeliveryConfirmationTransactionFee,
-		RootAccountForPayments,
-	>;
+	type MessageDeliveryAndDispatchPayment =
+		pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
+			Runtime,
+			pallet_balances::Pallet<Runtime>,
+			GetDeliveryConfirmationTransactionFee,
+			RootAccountForPayments,
+		>;
 	type OnMessageAccepted = ();
 	type OnDeliveryConfirmed = ();
 
@@ -625,8 +639,13 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signatu
 /// Extrinsic type that has already been checked.
 pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
 /// Executive: handles dispatch to the various modules.
-pub type Executive =
-	frame_executive::Executive<Runtime, Block, frame_system::ChainContext<Runtime>, Runtime, AllPallets>;
+pub type Executive = frame_executive::Executive<
+	Runtime,
+	Block,
+	frame_system::ChainContext<Runtime>,
+	Runtime,
+	AllPallets,
+>;
 
 impl_runtime_apis! {
 	impl sp_api::Core<Block> for Runtime {
@@ -1277,8 +1296,8 @@ impl_runtime_apis! {
 /// Millau account ownership digest from Rialto.
 ///
 /// The byte vector returned by this function should be signed with a Millau account private key.
-/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private key
-/// is also under his control.
+/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private
+/// key is also under his control.
 pub fn rialto_to_millau_account_ownership_digest<Call, AccountId, SpecVersion>(
 	millau_call: &Call,
 	rialto_account_id: AccountId,
@@ -1305,7 +1324,8 @@ mod tests {
 	use bridge_runtime_common::messages;
 
 	fn run_deposit_into_test(test: impl Fn(AccountId) -> Balance) {
-		let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::<Runtime>().unwrap().into();
+		let mut ext: sp_io::TestExternalities =
+			SystemConfig::default().build_storage::<Runtime>().unwrap().into();
 		ext.execute_with(|| {
 			// initially issuance is zero
 			assert_eq!(
@@ -1317,7 +1337,10 @@ mod tests {
 			let account: AccountId = [1u8; 32].into();
 			let initial_amount = ExistentialDeposit::get();
 			let deposited =
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(&account, initial_amount);
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
+					&account,
+					initial_amount,
+				);
 			drop(deposited);
 			assert_eq!(
 				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::total_issuance(),
@@ -1358,15 +1381,18 @@ mod tests {
 			bp_rialto::max_extrinsic_size(),
 			bp_rialto::max_extrinsic_weight(),
 			max_incoming_message_proof_size,
-			messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight()),
+			messages::target::maximal_incoming_message_dispatch_weight(
+				bp_rialto::max_extrinsic_weight(),
+			),
 		);
 
-		let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
-			bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
-			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
-			bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
-		)
-		.unwrap_or(u32::MAX);
+		let max_incoming_inbound_lane_data_proof_size =
+			bp_messages::InboundLaneData::<()>::encoded_size_hint(
+				bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
+				bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
+				bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
+			)
+			.unwrap_or(u32::MAX);
 		pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights>(
 			bp_rialto::max_extrinsic_size(),
 			bp_rialto::max_extrinsic_weight(),
@@ -1381,7 +1407,9 @@ mod tests {
 	fn deposit_into_existing_account_works() {
 		run_deposit_into_test(|existing_account| {
 			let initial_amount =
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&existing_account);
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&existing_account,
+				);
 			let additional_amount = 10_000;
 			<Runtime as pallet_bridge_currency_exchange::Config<KovanCurrencyExchange>>::DepositInto::deposit_into(
 				existing_account.clone(),
@@ -1389,7 +1417,9 @@ mod tests {
 			)
 			.unwrap();
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&existing_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&existing_account
+				),
 				initial_amount + additional_amount,
 			);
 			additional_amount
@@ -1408,7 +1438,9 @@ mod tests {
 			)
 			.unwrap();
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&new_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&new_account
+				),
 				initial_amount + additional_amount,
 			);
 			additional_amount
diff --git a/bridges/bin/rialto/runtime/src/millau_messages.rs b/bridges/bin/rialto/runtime/src/millau_messages.rs
index bb24a95f9ccba1994344abf94eb3aac2c107991c..0342f5b48ab392f0bc8f04d85c67a1db373ce873 100644
--- a/bridges/bin/rialto/runtime/src/millau_messages.rs
+++ b/bridges/bin/rialto/runtime/src/millau_messages.rs
@@ -35,7 +35,8 @@ use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128};
 use sp_std::{convert::TryFrom, ops::RangeInclusive};
 
 /// Initial value of `MillauToRialtoConversionRate` parameter.
-pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
+pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 =
+	FixedU128::from_inner(FixedU128::DIV);
 /// Initial value of `MillauFeeMultiplier` parameter.
 pub const INITIAL_MILLAU_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
 
@@ -47,13 +48,16 @@ parameter_types! {
 }
 
 /// Message payload for Rialto -> Millau messages.
-pub type ToMillauMessagePayload = messages::source::FromThisChainMessagePayload<WithMillauMessageBridge>;
+pub type ToMillauMessagePayload =
+	messages::source::FromThisChainMessagePayload<WithMillauMessageBridge>;
 
 /// Message verifier for Rialto -> Millau messages.
-pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifier<WithMillauMessageBridge>;
+pub type ToMillauMessageVerifier =
+	messages::source::FromThisChainMessageVerifier<WithMillauMessageBridge>;
 
 /// Message payload for Millau -> Rialto messages.
-pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload<WithMillauMessageBridge>;
+pub type FromMillauMessagePayload =
+	messages::target::FromBridgedChainMessagePayload<WithMillauMessageBridge>;
 
 /// Encoded Rialto Call as it comes from Millau.
 pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
@@ -70,7 +74,8 @@ pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDi
 pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof<bp_millau::Hash>;
 
 /// Messages delivery proof for Rialto -> Millau messages.
-pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof<bp_millau::Hash>;
+pub type ToMillauMessagesDeliveryProof =
+	messages::source::FromBridgedChainMessagesDeliveryProof<bp_millau::Hash>;
 
 /// Millau <-> Rialto message bridge.
 #[derive(RuntimeDebug, Clone, Copy)]
@@ -86,8 +91,10 @@ impl MessageBridge for WithMillauMessageBridge {
 	type BridgedChain = Millau;
 
 	fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance {
-		bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance))
-			.unwrap_or(bp_rialto::Balance::MAX)
+		bp_rialto::Balance::try_from(
+			MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance),
+		)
+		.unwrap_or(bp_rialto::Balance::MAX)
 	}
 }
 
@@ -167,12 +174,15 @@ impl messages::BridgedChainWithMessages for Millau {
 
 	fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive<Weight> {
 		// we don't want to relay too large messages + keep reserve for future upgrades
-		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight());
+		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(
+			bp_millau::max_extrinsic_weight(),
+		);
 
-		// we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` function
+		// we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment`
+		// function
 		//
-		// this bridge may be used to deliver all kind of messages, so we're not making any assumptions about
-		// minimal dispatch weight here
+		// this bridge may be used to deliver all kind of messages, so we're not making any
+		// assumptions about minimal dispatch weight here
 
 		0..=upper_limit
 	}
@@ -232,9 +242,11 @@ impl TargetHeaderChain<ToMillauMessagePayload, bp_millau::AccountId> for Millau
 	fn verify_messages_delivery_proof(
 		proof: Self::MessagesDeliveryProof,
 	) -> Result<(LaneId, InboundLaneData<bp_rialto::AccountId>), Self::Error> {
-		messages::source::verify_messages_delivery_proof::<WithMillauMessageBridge, Runtime, crate::MillauGrandpaInstance>(
-			proof,
-		)
+		messages::source::verify_messages_delivery_proof::<
+			WithMillauMessageBridge,
+			Runtime,
+			crate::MillauGrandpaInstance,
+		>(proof)
 	}
 }
 
@@ -251,10 +263,11 @@ impl SourceHeaderChain<bp_millau::Balance> for Millau {
 		proof: Self::MessagesProof,
 		messages_count: u32,
 	) -> Result<ProvedMessages<Message<bp_millau::Balance>>, Self::Error> {
-		messages::target::verify_messages_proof::<WithMillauMessageBridge, Runtime, crate::MillauGrandpaInstance>(
-			proof,
-			messages_count,
-		)
+		messages::target::verify_messages_proof::<
+			WithMillauMessageBridge,
+			Runtime,
+			crate::MillauGrandpaInstance,
+		>(proof, messages_count)
 	}
 }
 
@@ -268,9 +281,8 @@ pub enum RialtoToMillauMessagesParameter {
 impl MessagesParameter for RialtoToMillauMessagesParameter {
 	fn save(&self) {
 		match *self {
-			RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => {
-				MillauToRialtoConversionRate::set(conversion_rate)
-			}
+			RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) =>
+				MillauToRialtoConversionRate::set(conversion_rate),
 		}
 	}
 }
@@ -285,7 +297,9 @@ mod tests {
 		MessageKey,
 	};
 	use bp_runtime::{derive_account_id, messages::DispatchFeePayment, SourceAccount};
-	use bridge_runtime_common::messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload};
+	use bridge_runtime_common::messages::target::{
+		FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload,
+	};
 	use frame_support::{
 		traits::Currency,
 		weights::{GetDispatchInfo, WeightToFeePolynomial},
@@ -297,12 +311,15 @@ mod tests {
 		// this test actually belongs to the `bridge-runtime-common` crate, but there we have no
 		// mock runtime. Making another one there just for this test, given that both crates
 		// live n single repo is an overkill
-		let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::<Runtime>().unwrap().into();
+		let mut ext: sp_io::TestExternalities =
+			SystemConfig::default().build_storage::<Runtime>().unwrap().into();
 		ext.execute_with(|| {
 			let bridge = MILLAU_CHAIN_ID;
 			let call: Call = SystemCall::remark(vec![]).into();
 			let dispatch_weight = call.get_dispatch_info().weight;
-			let dispatch_fee = <Runtime as pallet_transaction_payment::Config>::WeightToFee::calc(&dispatch_weight);
+			let dispatch_fee = <Runtime as pallet_transaction_payment::Config>::WeightToFee::calc(
+				&dispatch_weight,
+			);
 			assert!(dispatch_fee > 0);
 
 			// create relayer account with minimal balance
@@ -314,12 +331,13 @@ mod tests {
 			);
 
 			// create dispatch account with minimal balance + dispatch fee
-			let dispatch_account = derive_account_id::<<Runtime as pallet_bridge_dispatch::Config>::SourceChainAccountId>(
-				bridge,
-				SourceAccount::Root,
-			);
+			let dispatch_account = derive_account_id::<
+				<Runtime as pallet_bridge_dispatch::Config>::SourceChainAccountId,
+			>(bridge, SourceAccount::Root);
 			let dispatch_account =
-				<Runtime as pallet_bridge_dispatch::Config>::AccountIdConverter::convert(dispatch_account);
+				<Runtime as pallet_bridge_dispatch::Config>::AccountIdConverter::convert(
+					dispatch_account,
+				);
 			let _ = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
 				&dispatch_account,
 				initial_amount + dispatch_fee,
@@ -329,10 +347,7 @@ mod tests {
 			FromMillauMessageDispatch::dispatch(
 				&relayer_account,
 				DispatchMessage {
-					key: MessageKey {
-						lane_id: Default::default(),
-						nonce: 0,
-					},
+					key: MessageKey { lane_id: Default::default(), nonce: 0 },
 					data: DispatchMessageData {
 						payload: Ok(FromBridgedChainMessagePayload::<WithMillauMessageBridge> {
 							spec_version: VERSION.spec_version,
@@ -348,11 +363,15 @@ mod tests {
 
 			// ensure that fee has been transferred from dispatch to relayer account
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&relayer_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&relayer_account
+				),
 				initial_amount + dispatch_fee,
 			);
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&dispatch_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&dispatch_account
+				),
 				initial_amount,
 			);
 		});
diff --git a/bridges/bin/rialto/runtime/src/parachains.rs b/bridges/bin/rialto/runtime/src/parachains.rs
index ba7b01ea116e8cc701076c5785591b5589723527..20ce9c29ef2d1909637fd48fb3538d1698e956cf 100644
--- a/bridges/bin/rialto/runtime/src/parachains.rs
+++ b/bridges/bin/rialto/runtime/src/parachains.rs
@@ -17,25 +17,21 @@
 //! Parachains support in Rialto runtime.
 
 use crate::{
-	AccountId, Balance, Balances, BlockNumber, Event, Origin, RandomnessCollectiveFlip, Registrar, Runtime, Slots,
+	AccountId, Balance, Balances, BlockNumber, Event, Origin, RandomnessCollectiveFlip, Registrar,
+	Runtime, Slots,
 };
 
 use frame_support::{parameter_types, weights::Weight};
 use frame_system::EnsureRoot;
 use polkadot_primitives::v1::ValidatorIndex;
 use polkadot_runtime_common::{paras_registrar, paras_sudo_wrapper, slots};
-use polkadot_runtime_parachains::configuration as parachains_configuration;
-use polkadot_runtime_parachains::dmp as parachains_dmp;
-use polkadot_runtime_parachains::hrmp as parachains_hrmp;
-use polkadot_runtime_parachains::inclusion as parachains_inclusion;
-use polkadot_runtime_parachains::initializer as parachains_initializer;
-use polkadot_runtime_parachains::origin as parachains_origin;
-use polkadot_runtime_parachains::paras as parachains_paras;
-use polkadot_runtime_parachains::paras_inherent as parachains_paras_inherent;
-use polkadot_runtime_parachains::scheduler as parachains_scheduler;
-use polkadot_runtime_parachains::session_info as parachains_session_info;
-use polkadot_runtime_parachains::shared as parachains_shared;
-use polkadot_runtime_parachains::ump as parachains_ump;
+use polkadot_runtime_parachains::{
+	configuration as parachains_configuration, dmp as parachains_dmp, hrmp as parachains_hrmp,
+	inclusion as parachains_inclusion, initializer as parachains_initializer,
+	origin as parachains_origin, paras as parachains_paras,
+	paras_inherent as parachains_paras_inherent, scheduler as parachains_scheduler,
+	session_info as parachains_session_info, shared as parachains_shared, ump as parachains_ump,
+};
 
 /// Special `RewardValidators` that does nothing ;)
 pub struct RewardValidators;
diff --git a/bridges/bin/rialto/runtime/src/rialto_poa.rs b/bridges/bin/rialto/runtime/src/rialto_poa.rs
index 9bc74a2ebaac65fca6053dd357e7a6a25144e5dc..865ef387d1b43d6c2bc86188c0200a4b85cdefb3 100644
--- a/bridges/bin/rialto/runtime/src/rialto_poa.rs
+++ b/bridges/bin/rialto/runtime/src/rialto_poa.rs
@@ -23,8 +23,8 @@ use bp_header_chain::InclusionProofVerifier;
 use frame_support::RuntimeDebug;
 use hex_literal::hex;
 use pallet_bridge_eth_poa::{
-	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, ValidatorsConfiguration,
-	ValidatorsSource,
+	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy,
+	ValidatorsConfiguration, ValidatorsSource,
 };
 use sp_std::prelude::*;
 
@@ -79,11 +79,14 @@ pub fn genesis_header() -> AuraHeader {
 		timestamp: 0,
 		number: 0,
 		author: Default::default(),
-		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
-		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(),
+		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
+		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
+			.into(),
 		extra_data: vec![],
 		state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(),
-		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
+		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
 		log_bloom: Default::default(),
 		gas_used: Default::default(),
 		gas_limit: 0x222222.into(),
@@ -128,12 +131,17 @@ impl InclusionProofVerifier for RialtoBlockchain {
 	type Transaction = RawTransaction;
 	type TransactionInclusionProof = EthereumTransactionInclusionProof;
 
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction> {
-		let is_transaction_finalized =
-			crate::BridgeRialtoPoa::verify_transaction_finalized(proof.block, proof.index, &proof.proof);
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction> {
+		let is_transaction_finalized = crate::BridgeRialtoPoa::verify_transaction_finalized(
+			proof.block,
+			proof.index,
+			&proof.proof,
+		);
 
 		if !is_transaction_finalized {
-			return None;
+			return None
 		}
 
 		proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone())
diff --git a/bridges/bin/runtime-common/src/messages.rs b/bridges/bin/runtime-common/src/messages.rs
index 3286d364c7e566fa1f486297e38047dd01ecbd19..ecca6e823c699e63addaa3300d339fb965601c41 100644
--- a/bridges/bin/runtime-common/src/messages.rs
+++ b/bridges/bin/runtime-common/src/messages.rs
@@ -41,7 +41,10 @@ use sp_runtime::{
 	traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul, Saturating, Zero},
 	FixedPointNumber, FixedPointOperand, FixedU128,
 };
-use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec};
+use sp_std::{
+	cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive,
+	vec::Vec,
+};
 use sp_trie::StorageProof;
 
 /// Bidirectional message bridge.
@@ -64,7 +67,9 @@ pub trait MessageBridge {
 	type BridgedChain: BridgedChainWithMessages;
 
 	/// Convert Bridged chain balance into This chain balance.
-	fn bridged_balance_to_this_balance(bridged_balance: BalanceOf<BridgedChain<Self>>) -> BalanceOf<ThisChain<Self>>;
+	fn bridged_balance_to_this_balance(
+		bridged_balance: BalanceOf<BridgedChain<Self>>,
+	) -> BalanceOf<ThisChain<Self>>;
 }
 
 /// Chain that has `pallet-bridge-messages` and `dispatch` modules.
@@ -83,7 +88,14 @@ pub trait ChainWithMessages {
 	/// different weights.
 	type Weight: From<frame_support::weights::Weight> + PartialOrd;
 	/// Type of balances that is used on the chain.
-	type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From<u32> + Copy;
+	type Balance: Encode
+		+ Decode
+		+ CheckedAdd
+		+ CheckedDiv
+		+ CheckedMul
+		+ PartialOrd
+		+ From<u32>
+		+ Copy;
 }
 
 /// Message related transaction parameters estimation.
@@ -138,7 +150,8 @@ pub trait BridgedChainWithMessages: ChainWithMessages {
 		message_dispatch_weight: WeightOf<Self>,
 	) -> MessageTransaction<WeightOf<Self>>;
 
-	/// Returns minimal transaction fee that must be paid for given transaction at the Bridged chain.
+	/// Returns minimal transaction fee that must be paid for given transaction at the Bridged
+	/// chain.
 	fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self>;
 }
 
@@ -158,10 +171,11 @@ type RawStorageProof = Vec<Vec<u8>>;
 
 /// Compute fee of transaction at runtime where regular transaction payment pallet is being used.
 ///
-/// The value of `multiplier` parameter is the expected value of `pallet_transaction_payment::NextFeeMultiplier`
-/// at the moment when transaction is submitted. If you're charging this payment in advance (and that's what
-/// happens with delivery and confirmation transaction in this crate), then there's a chance that the actual
-/// fee will be larger than what is paid in advance. So the value must be chosen carefully.
+/// The value of `multiplier` parameter is the expected value of
+/// `pallet_transaction_payment::NextFeeMultiplier` at the moment when transaction is submitted. If
+/// you're charging this payment in advance (and that's what happens with delivery and confirmation
+/// transaction in this crate), then there's a chance that the actual fee will be larger than what
+/// is paid in advance. So the value must be chosen carefully.
 pub fn transaction_payment<Balance: AtLeast32BitUnsigned + FixedPointOperand>(
 	base_extrinsic_weight: Weight,
 	per_byte_fee: Balance,
@@ -224,7 +238,8 @@ pub mod source {
 	}
 
 	/// 'Parsed' message delivery proof - inbound lane id and its state.
-	pub type ParsedMessagesDeliveryProofFromBridgedChain<B> = (LaneId, InboundLaneData<AccountIdOf<ThisChain<B>>>);
+	pub type ParsedMessagesDeliveryProofFromBridgedChain<B> =
+		(LaneId, InboundLaneData<AccountIdOf<ThisChain<B>>>);
 
 	/// Message verifier that is doing all basic checks.
 	///
@@ -236,19 +251,27 @@ pub mod source {
 	/// Following checks are made:
 	///
 	/// - message is rejected if its lane is currently blocked;
-	/// - message is rejected if there are too many pending (undelivered) messages at the outbound lane;
-	/// - check that the sender has rights to dispatch the call on target chain using provided dispatch origin;
+	/// - message is rejected if there are too many pending (undelivered) messages at the outbound
+	///   lane;
+	/// - check that the sender has rights to dispatch the call on target chain using provided
+	///   dispatch origin;
 	/// - check that the sender has paid enough funds for both message delivery and dispatch.
 	#[derive(RuntimeDebug)]
 	pub struct FromThisChainMessageVerifier<B>(PhantomData<B>);
 
 	pub(crate) const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled.";
 	pub(crate) const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane.";
-	pub(crate) const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin.";
-	pub(crate) const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane.";
+	pub(crate) const BAD_ORIGIN: &str =
+		"Unable to match the source origin to expected target origin.";
+	pub(crate) const TOO_LOW_FEE: &str =
+		"Provided fee is below minimal threshold required by the lane.";
 
-	impl<B> LaneMessageVerifier<AccountIdOf<ThisChain<B>>, FromThisChainMessagePayload<B>, BalanceOf<ThisChain<B>>>
-		for FromThisChainMessageVerifier<B>
+	impl<B>
+		LaneMessageVerifier<
+			AccountIdOf<ThisChain<B>>,
+			FromThisChainMessagePayload<B>,
+			BalanceOf<ThisChain<B>>,
+		> for FromThisChainMessageVerifier<B>
 	where
 		B: MessageBridge,
 		AccountIdOf<ThisChain<B>>: PartialEq + Clone,
@@ -264,7 +287,7 @@ pub mod source {
 		) -> Result<(), Self::Error> {
 			// reject message if lane is blocked
 			if !ThisChain::<B>::is_outbound_lane_enabled(lane) {
-				return Err(OUTBOUND_LANE_DISABLED);
+				return Err(OUTBOUND_LANE_DISABLED)
 			}
 
 			// reject message if there are too many pending messages at this lane
@@ -273,19 +296,20 @@ pub mod source {
 				.latest_generated_nonce
 				.saturating_sub(lane_outbound_data.latest_received_nonce);
 			if pending_messages > max_pending_messages {
-				return Err(TOO_MANY_PENDING_MESSAGES);
+				return Err(TOO_MANY_PENDING_MESSAGES)
 			}
 
 			// Do the dispatch-specific check. We assume that the target chain uses
 			// `Dispatch`, so we verify the message accordingly.
-			pallet_bridge_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?;
+			pallet_bridge_dispatch::verify_message_origin(submitter, payload)
+				.map_err(|_| BAD_ORIGIN)?;
 
 			let minimal_fee_in_this_tokens =
 				estimate_message_dispatch_and_delivery_fee::<B>(payload, B::RELAYER_FEE_PERCENT)?;
 
 			// compare with actual fee paid
 			if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens {
-				return Err(TOO_LOW_FEE);
+				return Err(TOO_LOW_FEE)
 			}
 
 			Ok(())
@@ -307,13 +331,13 @@ pub mod source {
 	) -> Result<(), &'static str> {
 		let weight_limits = BridgedChain::<B>::message_weight_limits(&payload.call);
 		if !weight_limits.contains(&payload.weight.into()) {
-			return Err("Incorrect message weight declared");
+			return Err("Incorrect message weight declared")
 		}
 
 		// The maximal size of extrinsic at Substrate-based chain depends on the
-		// `frame_system::Config::MaximumBlockLength` and `frame_system::Config::AvailableBlockRatio`
-		// constants. This check is here to be sure that the lane won't stuck because message is too
-		// large to fit into delivery transaction.
+		// `frame_system::Config::MaximumBlockLength` and
+		// `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that
+		// the lane won't stuck because message is too large to fit into delivery transaction.
 		//
 		// **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not
 		// the message itself. The proof is always larger than the message. But unless chain state
@@ -321,16 +345,17 @@ pub mod source {
 		// transaction also contains signatures and signed extensions. Because of this, we reserve
 		// 1/3 of the the maximal extrinsic weight for this data.
 		if payload.call.len() > maximal_message_size::<B>() as usize {
-			return Err("The message is too large to be sent over the lane");
+			return Err("The message is too large to be sent over the lane")
 		}
 
 		Ok(())
 	}
 
-	/// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged chain.
+	/// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged
+	/// chain.
 	///
-	/// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional conversions.
-	/// Returns `None` if overflow has happened.
+	/// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional
+	/// conversions. Returns `None` if overflow has happened.
 	pub fn estimate_message_dispatch_and_delivery_fee<B: MessageBridge>(
 		payload: &FromThisChainMessagePayload<B>,
 		relayer_fee_percent: u32,
@@ -339,25 +364,23 @@ pub mod source {
 		//
 		// if we're going to pay dispatch fee at the target chain, then we don't include weight
 		// of the message dispatch in the delivery transaction cost
-		let pay_dispatch_fee_at_target_chain = payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
+		let pay_dispatch_fee_at_target_chain =
+			payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
 		let delivery_transaction = BridgedChain::<B>::estimate_delivery_transaction(
 			&payload.encode(),
 			pay_dispatch_fee_at_target_chain,
-			if pay_dispatch_fee_at_target_chain {
-				0.into()
-			} else {
-				payload.weight.into()
-			},
+			if pay_dispatch_fee_at_target_chain { 0.into() } else { payload.weight.into() },
 		);
 		let delivery_transaction_fee = BridgedChain::<B>::transaction_payment(delivery_transaction);
 
 		// the fee (in This tokens) of all transactions that are made on This chain
 		let confirmation_transaction = ThisChain::<B>::estimate_delivery_confirmation_transaction();
-		let confirmation_transaction_fee = ThisChain::<B>::transaction_payment(confirmation_transaction);
+		let confirmation_transaction_fee =
+			ThisChain::<B>::transaction_payment(confirmation_transaction);
 
 		// minimal fee (in This tokens) is a sum of all required fees
-		let minimal_fee =
-			B::bridged_balance_to_this_balance(delivery_transaction_fee).checked_add(&confirmation_transaction_fee);
+		let minimal_fee = B::bridged_balance_to_this_balance(delivery_transaction_fee)
+			.checked_add(&confirmation_transaction_fee);
 
 		// before returning, add extra fee that is paid to the relayer (relayer interest)
 		minimal_fee
@@ -378,14 +401,14 @@ pub mod source {
 	) -> Result<ParsedMessagesDeliveryProofFromBridgedChain<B>, &'static str>
 	where
 		ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
-		HashOf<BridgedChain<B>>:
-			Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
+		HashOf<BridgedChain<B>>: Into<
+			bp_runtime::HashOf<
+				<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain,
+			>,
+		>,
 	{
-		let FromBridgedChainMessagesDeliveryProof {
-			bridged_header_hash,
-			storage_proof,
-			lane,
-		} = proof;
+		let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } =
+			proof;
 		pallet_bridge_grandpa::Pallet::<ThisRuntime, GrandpaInstance>::parse_finalized_storage_proof(
 			bridged_header_hash.into(),
 			StorageProof::new(storage_proof),
@@ -470,14 +493,13 @@ pub mod target {
 	impl<DecodedCall> FromBridgedChainEncodedMessageCall<DecodedCall> {
 		/// Create encoded call.
 		pub fn new(encoded_call: Vec<u8>) -> Self {
-			FromBridgedChainEncodedMessageCall {
-				encoded_call,
-				_marker: PhantomData::default(),
-			}
+			FromBridgedChainEncodedMessageCall { encoded_call, _marker: PhantomData::default() }
 		}
 	}
 
-	impl<DecodedCall: Decode> From<FromBridgedChainEncodedMessageCall<DecodedCall>> for Result<DecodedCall, ()> {
+	impl<DecodedCall: Decode> From<FromBridgedChainEncodedMessageCall<DecodedCall>>
+		for Result<DecodedCall, ()>
+	{
 		fn from(encoded_call: FromBridgedChainEncodedMessageCall<DecodedCall>) -> Self {
 			DecodedCall::decode(&mut &encoded_call.encoded_call[..]).map_err(drop)
 		}
@@ -495,16 +517,22 @@ pub mod target {
 	where
 		BalanceOf<ThisChain<B>>: Saturating + FixedPointOperand,
 		ThisDispatchInstance: 'static,
-		ThisRuntime: pallet_bridge_dispatch::Config<ThisDispatchInstance, BridgeMessageId = (LaneId, MessageNonce)>
-			+ pallet_transaction_payment::Config,
+		ThisRuntime: pallet_bridge_dispatch::Config<
+				ThisDispatchInstance,
+				BridgeMessageId = (LaneId, MessageNonce),
+			> + pallet_transaction_payment::Config,
 		<ThisRuntime as pallet_transaction_payment::Config>::OnChargeTransaction:
-			pallet_transaction_payment::OnChargeTransaction<ThisRuntime, Balance = BalanceOf<ThisChain<B>>>,
+			pallet_transaction_payment::OnChargeTransaction<
+				ThisRuntime,
+				Balance = BalanceOf<ThisChain<B>>,
+			>,
 		ThisCurrency: Currency<AccountIdOf<ThisChain<B>>, Balance = BalanceOf<ThisChain<B>>>,
-		pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>: bp_message_dispatch::MessageDispatch<
-			AccountIdOf<ThisChain<B>>,
-			(LaneId, MessageNonce),
-			Message = FromBridgedChainMessagePayload<B>,
-		>,
+		pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>:
+			bp_message_dispatch::MessageDispatch<
+				AccountIdOf<ThisChain<B>>,
+				(LaneId, MessageNonce),
+				Message = FromBridgedChainMessagePayload<B>,
+			>,
 	{
 		type DispatchPayload = FromBridgedChainMessagePayload<B>;
 
@@ -526,8 +554,10 @@ pub mod target {
 				message.data.payload.map_err(drop),
 				|dispatch_origin, dispatch_weight| {
 					let unadjusted_weight_fee = ThisRuntime::WeightToFee::calc(&dispatch_weight);
-					let fee_multiplier = pallet_transaction_payment::Pallet::<ThisRuntime>::next_fee_multiplier();
-					let adjusted_weight_fee = fee_multiplier.saturating_mul_int(unadjusted_weight_fee);
+					let fee_multiplier =
+						pallet_transaction_payment::Pallet::<ThisRuntime>::next_fee_multiplier();
+					let adjusted_weight_fee =
+						fee_multiplier.saturating_mul_int(unadjusted_weight_fee);
 					if !adjusted_weight_fee.is_zero() {
 						ThisCurrency::transfer(
 							dispatch_origin,
@@ -565,8 +595,11 @@ pub mod target {
 	) -> Result<ProvedMessages<Message<BalanceOf<BridgedChain<B>>>>, &'static str>
 	where
 		ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
-		HashOf<BridgedChain<B>>:
-			Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
+		HashOf<BridgedChain<B>>: Into<
+			bp_runtime::HashOf<
+				<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain,
+			>,
+		>,
 	{
 		verify_messages_proof_with_parser::<B, _, _>(
 			proof,
@@ -601,12 +634,13 @@ pub mod target {
 		fn from(err: MessageProofError) -> &'static str {
 			match err {
 				MessageProofError::Empty => "Messages proof is empty",
-				MessageProofError::MessagesCountMismatch => "Declared messages count doesn't match actual value",
+				MessageProofError::MessagesCountMismatch =>
+					"Declared messages count doesn't match actual value",
 				MessageProofError::MissingRequiredMessage => "Message is missing from the proof",
-				MessageProofError::FailedToDecodeMessage => "Failed to decode message from the proof",
-				MessageProofError::FailedToDecodeOutboundLaneState => {
-					"Failed to decode outbound lane data from the proof"
-				}
+				MessageProofError::FailedToDecodeMessage =>
+					"Failed to decode message from the proof",
+				MessageProofError::FailedToDecodeOutboundLaneState =>
+					"Failed to decode outbound lane data from the proof",
 				MessageProofError::Custom(err) => err,
 			}
 		}
@@ -629,10 +663,11 @@ pub mod target {
 	{
 		fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option<Vec<u8>> {
 			let storage_outbound_lane_data_key =
-				pallet_bridge_messages::storage_keys::outbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, lane_id);
-			self.storage
-				.read_value(storage_outbound_lane_data_key.0.as_ref())
-				.ok()?
+				pallet_bridge_messages::storage_keys::outbound_lane_data_key(
+					B::BRIDGED_MESSAGES_PALLET_NAME,
+					lane_id,
+				);
+			self.storage.read_value(storage_outbound_lane_data_key.0.as_ref()).ok()?
 		}
 
 		fn read_raw_message(&self, message_key: &MessageKey) -> Option<Vec<u8>> {
@@ -652,7 +687,8 @@ pub mod target {
 		build_parser: BuildParser,
 	) -> Result<ProvedMessages<Message<BalanceOf<BridgedChain<B>>>>, MessageProofError>
 	where
-		BuildParser: FnOnce(HashOf<BridgedChain<B>>, RawStorageProof) -> Result<Parser, MessageProofError>,
+		BuildParser:
+			FnOnce(HashOf<BridgedChain<B>>, RawStorageProof) -> Result<Parser, MessageProofError>,
 		Parser: MessageProofParser,
 	{
 		let FromBridgedChainMessagesProof {
@@ -664,18 +700,19 @@ pub mod target {
 		} = proof;
 
 		// receiving proofs where end < begin is ok (if proof includes outbound lane state)
-		let messages_in_the_proof = if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) {
-			// let's check that the user (relayer) has passed correct `messages_count`
-			// (this bounds maximal capacity of messages vec below)
-			let messages_in_the_proof = nonces_difference.saturating_add(1);
-			if messages_in_the_proof != MessageNonce::from(messages_count) {
-				return Err(MessageProofError::MessagesCountMismatch);
-			}
+		let messages_in_the_proof =
+			if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) {
+				// let's check that the user (relayer) has passed correct `messages_count`
+				// (this bounds maximal capacity of messages vec below)
+				let messages_in_the_proof = nonces_difference.saturating_add(1);
+				if messages_in_the_proof != MessageNonce::from(messages_count) {
+					return Err(MessageProofError::MessagesCountMismatch)
+				}
 
-			messages_in_the_proof
-		} else {
-			0
-		};
+				messages_in_the_proof
+			} else {
+				0
+			};
 
 		let parser = build_parser(bridged_header_hash, storage_proof)?;
 
@@ -689,20 +726,15 @@ pub mod target {
 			let raw_message_data = parser
 				.read_raw_message(&message_key)
 				.ok_or(MessageProofError::MissingRequiredMessage)?;
-			let message_data = MessageData::<BalanceOf<BridgedChain<B>>>::decode(&mut &raw_message_data[..])
-				.map_err(|_| MessageProofError::FailedToDecodeMessage)?;
-			messages.push(Message {
-				key: message_key,
-				data: message_data,
-			});
+			let message_data =
+				MessageData::<BalanceOf<BridgedChain<B>>>::decode(&mut &raw_message_data[..])
+					.map_err(|_| MessageProofError::FailedToDecodeMessage)?;
+			messages.push(Message { key: message_key, data: message_data });
 		}
 
 		// Now let's check if proof contains outbound lane state proof. It is optional, so we
 		// simply ignore `read_value` errors and missing value.
-		let mut proved_lane_messages = ProvedLaneMessages {
-			lane_state: None,
-			messages,
-		};
+		let mut proved_lane_messages = ProvedLaneMessages { lane_state: None, messages };
 		let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane);
 		if let Some(raw_outbound_lane_data) = raw_outbound_lane_data {
 			proved_lane_messages.lane_state = Some(
@@ -713,7 +745,7 @@ pub mod target {
 
 		// Now we may actually check if the proof is empty or not.
 		if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() {
-			return Err(MessageProofError::Empty);
+			return Err(MessageProofError::Empty)
 		}
 
 		// We only support single lane messages in this schema
@@ -739,7 +771,8 @@ mod tests {
 	const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048;
 	const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024;
 
-	/// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from BridgedChain;
+	/// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from
+	/// BridgedChain;
 	#[derive(Debug, PartialEq, Eq)]
 	struct OnThisChainBridge;
 
@@ -752,12 +785,15 @@ mod tests {
 		type ThisChain = ThisChain;
 		type BridgedChain = BridgedChain;
 
-		fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance {
+		fn bridged_balance_to_this_balance(
+			bridged_balance: BridgedChainBalance,
+		) -> ThisChainBalance {
 			ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32)
 		}
 	}
 
-	/// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from ThisChain;
+	/// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from
+	/// ThisChain;
 	#[derive(Debug, PartialEq, Eq)]
 	struct OnBridgedChainBridge;
 
@@ -892,7 +928,9 @@ mod tests {
 		}
 
 		fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
-			ThisChainBalance(transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32)
+			ThisChainBalance(
+				transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32,
+			)
 		}
 	}
 
@@ -913,7 +951,9 @@ mod tests {
 			unreachable!()
 		}
 
-		fn transaction_payment(_transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
+		fn transaction_payment(
+			_transaction: MessageTransaction<WeightOf<Self>>,
+		) -> BalanceOf<Self> {
 			unreachable!()
 		}
 	}
@@ -944,7 +984,9 @@ mod tests {
 			unreachable!()
 		}
 
-		fn transaction_payment(_transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
+		fn transaction_payment(
+			_transaction: MessageTransaction<WeightOf<Self>>,
+		) -> BalanceOf<Self> {
 			unreachable!()
 		}
 	}
@@ -955,7 +997,8 @@ mod tests {
 		}
 
 		fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive<Self::Weight> {
-			let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight);
+			let begin =
+				std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight);
 			begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT
 		}
 
@@ -971,7 +1014,9 @@ mod tests {
 		}
 
 		fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
-			BridgedChainBalance(transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32)
+			BridgedChainBalance(
+				transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32,
+			)
 		}
 	}
 
@@ -982,19 +1027,22 @@ mod tests {
 	#[test]
 	fn message_from_bridged_chain_is_decoded() {
 		// the message is encoded on the bridged chain
-		let message_on_bridged_chain = source::FromThisChainMessagePayload::<OnBridgedChainBridge> {
-			spec_version: 1,
-			weight: 100,
-			origin: bp_message_dispatch::CallOrigin::SourceRoot,
-			dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
-			call: ThisChainCall::Transfer.encode(),
-		}
-		.encode();
+		let message_on_bridged_chain =
+			source::FromThisChainMessagePayload::<OnBridgedChainBridge> {
+				spec_version: 1,
+				weight: 100,
+				origin: bp_message_dispatch::CallOrigin::SourceRoot,
+				dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
+				call: ThisChainCall::Transfer.encode(),
+			}
+			.encode();
 
 		// and sent to this chain where it is decoded
 		let message_on_this_chain =
-			target::FromBridgedChainMessagePayload::<OnThisChainBridge>::decode(&mut &message_on_bridged_chain[..])
-				.unwrap();
+			target::FromBridgedChainMessagePayload::<OnThisChainBridge>::decode(
+				&mut &message_on_bridged_chain[..],
+			)
+			.unwrap();
 		assert_eq!(
 			message_on_this_chain,
 			target::FromBridgedChainMessagePayload::<OnThisChainBridge> {
@@ -1013,7 +1061,8 @@ mod tests {
 	const TEST_LANE_ID: &LaneId = b"test";
 	const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32;
 
-	fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload<OnThisChainBridge> {
+	fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload<OnThisChainBridge>
+	{
 		source::FromThisChainMessagePayload::<OnThisChainBridge> {
 			spec_version: 1,
 			weight: 100,
@@ -1042,11 +1091,14 @@ mod tests {
 		// let's check if estimation is less than hardcoded, if dispatch is paid at target chain
 		let mut payload_with_pay_on_target = regular_outbound_message_payload();
 		payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
-		let fee_at_source = source::estimate_message_dispatch_and_delivery_fee::<OnThisChainBridge>(
-			&payload_with_pay_on_target,
-			OnThisChainBridge::RELAYER_FEE_PERCENT,
-		)
-		.expect("estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message");
+		let fee_at_source =
+			source::estimate_message_dispatch_and_delivery_fee::<OnThisChainBridge>(
+				&payload_with_pay_on_target,
+				OnThisChainBridge::RELAYER_FEE_PERCENT,
+			)
+			.expect(
+				"estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message",
+			);
 		assert!(
 			fee_at_source < EXPECTED_MINIMAL_FEE.into(),
 			"Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}",
@@ -1065,16 +1117,14 @@ mod tests {
 			),
 			Err(source::TOO_LOW_FEE)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Root,
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Root,
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1109,16 +1159,14 @@ mod tests {
 			),
 			Err(source::BAD_ORIGIN)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Root,
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Root,
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1143,16 +1191,14 @@ mod tests {
 			),
 			Err(source::BAD_ORIGIN)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Signed(ThisChainAccountId(1)),
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Signed(ThisChainAccountId(1)),
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1189,64 +1235,58 @@ mod tests {
 
 	#[test]
 	fn verify_chain_message_rejects_message_with_too_small_declared_weight() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: 5,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![1, 2, 3, 4, 5, 6],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_rejects_message_with_too_large_declared_weight() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![1, 2, 3, 4, 5, 6],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_rejects_message_too_large_message() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as usize + 1],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_accepts_maximal_message() {
 		assert_eq!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
-				spec_version: 1,
-				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
-				origin: bp_message_dispatch::CallOrigin::SourceRoot,
-				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
-				call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as _],
-			},),
+			source::verify_chain_message::<OnThisChainBridge>(
+				&source::FromThisChainMessagePayload::<OnThisChainBridge> {
+					spec_version: 1,
+					weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
+					origin: bp_message_dispatch::CallOrigin::SourceRoot,
+					dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
+					call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as _],
+				},
+			),
 			Ok(()),
 		);
 	}
@@ -1338,13 +1378,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_required_message_is_missing() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(10), 10, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(10),
+				10,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: 1..=5,
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::MissingRequiredMessage),
 		);
 	}
@@ -1352,13 +1394,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_message_decode_fails() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(10), 10, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(10),
+				10,
+				|_, _| Ok(TestMessageProofParser {
 					failing: true,
 					messages: 1..=10,
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::FailedToDecodeMessage),
 		);
 	}
@@ -1366,8 +1410,10 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: true,
 					messages: no_messages_range(),
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1375,8 +1421,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::FailedToDecodeOutboundLaneState),
 		);
 	}
@@ -1384,13 +1430,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_it_is_empty() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: no_messages_range(),
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::Empty),
 		);
 	}
@@ -1398,8 +1446,10 @@ mod tests {
 	#[test]
 	fn non_empty_message_proof_without_messages_is_accepted() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: no_messages_range(),
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1407,8 +1457,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Ok(vec![(
 				Default::default(),
 				ProvedLaneMessages {
@@ -1428,8 +1478,10 @@ mod tests {
 	#[test]
 	fn non_empty_message_proof_is_accepted() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(1), 1, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(1),
+				1,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: 1..=1,
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1437,8 +1489,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Ok(vec![(
 				Default::default(),
 				ProvedLaneMessages {
@@ -1448,14 +1500,8 @@ mod tests {
 						latest_generated_nonce: 1,
 					}),
 					messages: vec![Message {
-						key: MessageKey {
-							lane_id: Default::default(),
-							nonce: 1
-						},
-						data: MessageData {
-							payload: 1u64.encode(),
-							fee: BridgedChainBalance(0)
-						},
+						key: MessageKey { lane_id: Default::default(), nonce: 1 },
+						data: MessageData { payload: 1u64.encode(), fee: BridgedChainBalance(0) },
 					}],
 				},
 			)]
@@ -1494,10 +1540,7 @@ mod tests {
 				10,
 				FixedU128::zero(),
 				|weight| weight,
-				MessageTransaction {
-					size: 50,
-					dispatch_weight: 777
-				},
+				MessageTransaction { size: 50, dispatch_weight: 777 },
 			),
 			100 + 50 * 10,
 		);
@@ -1513,10 +1556,7 @@ mod tests {
 				10,
 				FixedU128::one(),
 				|weight| weight,
-				MessageTransaction {
-					size: 50,
-					dispatch_weight: 777
-				},
+				MessageTransaction { size: 50, dispatch_weight: 777 },
 			),
 			100 + 50 * 10 + 777,
 		);
diff --git a/bridges/bin/runtime-common/src/messages_benchmarking.rs b/bridges/bin/runtime-common/src/messages_benchmarking.rs
index 3785f4a4607f0af545eca7583b86d3219c76541f..217560e114344c61e502d888d92f130e09732db2 100644
--- a/bridges/bin/runtime-common/src/messages_benchmarking.rs
+++ b/bridges/bin/runtime-common/src/messages_benchmarking.rs
@@ -20,8 +20,8 @@
 #![cfg(feature = "runtime-benchmarks")]
 
 use crate::messages::{
-	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, AccountIdOf, BalanceOf,
-	BridgedChain, HashOf, MessageBridge, ThisChain,
+	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
+	AccountIdOf, BalanceOf, BridgedChain, HashOf, MessageBridge, ThisChain,
 };
 
 use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload};
@@ -29,13 +29,16 @@ use bp_runtime::ChainId;
 use codec::Encode;
 use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH};
 use frame_support::weights::Weight;
-use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize};
+use pallet_bridge_messages::benchmarking::{
+	MessageDeliveryProofParams, MessageProofParams, ProofSize,
+};
 use sp_core::Hasher;
 use sp_runtime::traits::Header;
 use sp_std::prelude::*;
 use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder, TrieMut};
 
-/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`.
+/// Generate ed25519 signature to be used in
+/// `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`.
 ///
 /// Returns public key of the signer and the signature itself.
 pub fn ed25519_sign(
@@ -47,8 +50,8 @@ pub fn ed25519_sign(
 ) -> ([u8; 32], [u8; 64]) {
 	// key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html)
 	let target_secret = SecretKey::from_bytes(&[
-		157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050,
-		105, 025, 112, 059, 172, 003, 028, 174, 127, 096,
+		157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073,
+		197, 105, 123, 050, 105, 025, 112, 059, 172, 003, 028, 174, 127, 096,
 	])
 	.expect("harcoded key is valid");
 	let target_public: PublicKey = (&target_secret).into();
@@ -56,7 +59,8 @@ pub fn ed25519_sign(
 	let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH];
 	target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes());
 	target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes());
-	let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid");
+	let target_pair =
+		ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid");
 
 	let signature_message = pallet_bridge_dispatch::account_ownership_digest(
 		target_call,
@@ -92,11 +96,8 @@ where
 	MH: Fn(H::Out) -> <R::BridgedChain as bp_runtime::Chain>::Header,
 {
 	// prepare Bridged chain storage with messages and (optionally) outbound lane state
-	let message_count = params
-		.message_nonces
-		.end()
-		.saturating_sub(*params.message_nonces.start())
-		+ 1;
+	let message_count =
+		params.message_nonces.end().saturating_sub(*params.message_nonces.start()) + 1;
 	let mut storage_keys = Vec::with_capacity(message_count as usize + 1);
 	let mut root = Default::default();
 	let mut mdb = MemoryDB::default();
@@ -105,10 +106,7 @@ where
 
 		// insert messages
 		for nonce in params.message_nonces.clone() {
-			let message_key = MessageKey {
-				lane_id: params.lane,
-				nonce,
-			};
+			let message_key = MessageKey { lane_id: params.lane, nonce };
 			let message_data = MessageData {
 				fee: BalanceOf::<BridgedChain<B>>::from(0),
 				payload: message_payload.clone(),
@@ -220,7 +218,7 @@ fn grow_trie<H: Hasher>(mut root: H::Out, mdb: &mut MemoryDB<H>, trie_size: Proo
 			.expect("record_all_keys should not fail in benchmarks");
 		let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum();
 		if size > minimal_trie_size as _ {
-			return root;
+			return root
 		}
 
 		let mut trie = TrieDBMut::<H>::from_existing(mdb, &mut root)
diff --git a/bridges/modules/currency-exchange/src/benchmarking.rs b/bridges/modules/currency-exchange/src/benchmarking.rs
index db8b2256c8074e1e16adf67d87919096657346e2..813c1bfe884ddd71760605372f54c6e99e5c916f 100644
--- a/bridges/modules/currency-exchange/src/benchmarking.rs
+++ b/bridges/modules/currency-exchange/src/benchmarking.rs
@@ -18,7 +18,10 @@
 //! So we are giving runtime opportunity to prepare environment and construct proof
 //! before invoking module calls.
 
-use super::{Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Pallet as CurrencyExchangePallet};
+use super::{
+	Call, Config as CurrencyExchangeConfig, InclusionProofVerifier,
+	Pallet as CurrencyExchangePallet,
+};
 use sp_std::prelude::*;
 
 use frame_benchmarking::{account, benchmarks_instance_pallet};
@@ -37,8 +40,8 @@ pub struct ProofParams<Recipient> {
 	pub recipient: Recipient,
 	/// When true, recipient must exists before import.
 	pub recipient_exists: bool,
-	/// When 0, transaction should have minimal possible size. When this value has non-zero value n,
-	/// transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
+	/// When 0, transaction should have minimal possible size. When this value has non-zero value
+	/// n, transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
 	pub transaction_size_factor: u32,
 	/// When 0, proof should have minimal possible size. When this value has non-zero value n,
 	/// proof size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
diff --git a/bridges/modules/currency-exchange/src/lib.rs b/bridges/modules/currency-exchange/src/lib.rs
index 550467f5d6fe7732feb2133a5870cc02bb6347cc..31b789dd97e739b9324ca16420a2c1dd30bf5b4b 100644
--- a/bridges/modules/currency-exchange/src/lib.rs
+++ b/bridges/modules/currency-exchange/src/lib.rs
@@ -19,7 +19,8 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use bp_currency_exchange::{
-	CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, RecipientsMap,
+	CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction,
+	RecipientsMap,
 };
 use bp_header_chain::InclusionProofVerifier;
 use frame_support::ensure;
@@ -92,7 +93,8 @@ pub mod pallet {
 			{
 				// if any changes were made to the storage, we can't just return error here, because
 				// otherwise the same proof may be imported again
-				let deposit_result = T::DepositInto::deposit_into(deposit.recipient, deposit.amount);
+				let deposit_result =
+					T::DepositInto::deposit_into(deposit.recipient, deposit.amount);
 				match deposit_result {
 					Ok(_) => (),
 					Err(ExchangeError::DepositPartiallyFailed) => (),
@@ -160,7 +162,7 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 				err,
 			);
 
-			return false;
+			return false
 		}
 
 		true
@@ -205,23 +207,16 @@ fn prepare_deposit_details<T: Config<I>, I: 'static>(
 		.ok_or(Error::<T, I>::UnfinalizedTransaction)?;
 
 	// parse transaction
-	let transaction =
-		<T as Config<I>>::PeerMaybeLockFundsTransaction::parse(&transaction).map_err(Error::<T, I>::from)?;
+	let transaction = <T as Config<I>>::PeerMaybeLockFundsTransaction::parse(&transaction)
+		.map_err(Error::<T, I>::from)?;
 	let transfer_id = transaction.id;
-	ensure!(
-		!Transfers::<T, I>::contains_key(&transfer_id),
-		Error::<T, I>::AlreadyClaimed
-	);
+	ensure!(!Transfers::<T, I>::contains_key(&transfer_id), Error::<T, I>::AlreadyClaimed);
 
 	// grant recipient
 	let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::<T, I>::from)?;
 	let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::<T, I>::from)?;
 
-	Ok(DepositDetails {
-		transfer_id,
-		recipient,
-		amount,
-	})
+	Ok(DepositDetails { transfer_id, recipient, amount })
 }
 
 #[cfg(test)]
@@ -231,7 +226,9 @@ mod tests {
 
 	use super::*;
 	use bp_currency_exchange::LockFundsTransaction;
-	use frame_support::{assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight};
+	use frame_support::{
+		assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight,
+	};
 	use sp_core::H256;
 	use sp_runtime::{
 		testing::Header,
@@ -264,7 +261,9 @@ mod tests {
 		type Transaction = RawTransaction;
 		type TransactionInclusionProof = (bool, RawTransaction);
 
-		fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<RawTransaction> {
+		fn verify_transaction_inclusion_proof(
+			proof: &Self::TransactionInclusionProof,
+		) -> Option<RawTransaction> {
 			if proof.0 {
 				Some(proof.1.clone())
 			} else {
@@ -295,7 +294,9 @@ mod tests {
 		type PeerRecipient = AccountId;
 		type Recipient = AccountId;
 
-		fn map(peer_recipient: Self::PeerRecipient) -> bp_currency_exchange::Result<Self::Recipient> {
+		fn map(
+			peer_recipient: Self::PeerRecipient,
+		) -> bp_currency_exchange::Result<Self::Recipient> {
 			match peer_recipient {
 				UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients),
 				_ => Ok(peer_recipient * 10),
@@ -323,10 +324,14 @@ mod tests {
 		type Recipient = AccountId;
 		type Amount = u64;
 
-		fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> {
+		fn deposit_into(
+			_recipient: Self::Recipient,
+			amount: Self::Amount,
+		) -> bp_currency_exchange::Result<()> {
 			match amount {
 				amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()),
-				amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed),
+				amount if amount == MAX_DEPOSIT_AMOUNT * 10 =>
+					Err(ExchangeError::DepositPartiallyFailed),
 				_ => Err(ExchangeError::DepositFailed),
 			}
 		}
@@ -391,25 +396,22 @@ mod tests {
 	}
 
 	fn new_test_ext() -> sp_io::TestExternalities {
-		let t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 		sp_io::TestExternalities::new(t)
 	}
 
 	fn transaction(id: u64) -> RawTransaction {
-		RawTransaction {
-			id,
-			recipient: 1,
-			amount: 2,
-		}
+		RawTransaction { id, recipient: 1, amount: 2 }
 	}
 
 	#[test]
 	fn unfinalized_transaction_rejected() {
 		new_test_ext().execute_with(|| {
 			assert_noop!(
-				Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (false, transaction(0))),
+				Exchange::import_peer_transaction(
+					Origin::signed(SUBMITTER),
+					(false, transaction(0))
+				),
 				Error::<TestRuntime, ()>::UnfinalizedTransaction,
 			);
 		});
diff --git a/bridges/modules/dispatch/src/lib.rs b/bridges/modules/dispatch/src/lib.rs
index 698d3842a0cd1e926b3f77c4a8f998e0cda2a5d7..3039c6e31654d36c2a32172a908810320ff03bd2 100644
--- a/bridges/modules/dispatch/src/lib.rs
+++ b/bridges/modules/dispatch/src/lib.rs
@@ -60,7 +60,13 @@ pub mod pallet {
 		/// it comes from the messages module.
 		type BridgeMessageId: Parameter;
 		/// Type of account ID on source chain.
-		type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default;
+		type SourceChainAccountId: Parameter
+			+ Member
+			+ MaybeSerializeDeserialize
+			+ Debug
+			+ MaybeDisplay
+			+ Ord
+			+ Default;
 		/// Type of account public key on target chain.
 		type TargetChainAccountPublic: Parameter + IdentifyAccount<AccountId = Self::AccountId>;
 		/// Type of signature that may prove that the message has been signed by
@@ -75,8 +81,8 @@ pub mod pallet {
 			>;
 		/// Pre-dispatch filter for incoming calls.
 		///
-		/// The pallet will filter all incoming calls right before they're dispatched. If this filter
-		/// rejects the call, special event (`Event::MessageCallRejected`) is emitted.
+		/// The pallet will filter all incoming calls right before they're dispatched. If this
+		/// filter rejects the call, special event (`Event::MessageCallRejected`) is emitted.
 		type CallFilter: Contains<<Self as Config<I>>::Call>;
 		/// The type that is used to wrap the `Self::Call` when it is moved over bridge.
 		///
@@ -136,8 +142,12 @@ pub mod pallet {
 }
 
 impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId> for Pallet<T, I> {
-	type Message =
-		MessagePayload<T::SourceChainAccountId, T::TargetChainAccountPublic, T::TargetChainSignature, T::EncodedCall>;
+	type Message = MessagePayload<
+		T::SourceChainAccountId,
+		T::TargetChainAccountPublic,
+		T::TargetChainSignature,
+		T::EncodedCall,
+	>;
 
 	fn dispatch_weight(message: &Self::Message) -> bp_message_dispatch::Weight {
 		message.weight
@@ -165,8 +175,8 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 					dispatch_result: false,
 					unspent_weight: 0,
 					dispatch_fee_paid_during_dispatch: false,
-				};
-			}
+				}
+			},
 		};
 
 		// verify spec version
@@ -191,7 +201,7 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 				expected_version,
 				message.spec_version,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 
 		// now that we have spec version checked, let's decode the call
@@ -205,18 +215,19 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 					id,
 				);
 				Self::deposit_event(Event::MessageCallDecodeFailed(source_chain, id));
-				return dispatch_result;
-			}
+				return dispatch_result
+			},
 		};
 
 		// prepare dispatch origin
 		let origin_account = match message.origin {
 			CallOrigin::SourceRoot => {
-				let hex_id = derive_account_id::<T::SourceChainAccountId>(source_chain, SourceAccount::Root);
+				let hex_id =
+					derive_account_id::<T::SourceChainAccountId>(source_chain, SourceAccount::Root);
 				let target_id = T::AccountIdConverter::convert(hex_id);
 				log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id);
 				target_id
-			}
+			},
 			CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => {
 				let digest = account_ownership_digest(
 					&call,
@@ -237,18 +248,19 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 						target_signature,
 					);
 					Self::deposit_event(Event::MessageSignatureMismatch(source_chain, id));
-					return dispatch_result;
+					return dispatch_result
 				}
 
 				log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account);
 				target_account
-			}
+			},
 			CallOrigin::SourceAccount(source_account_id) => {
-				let hex_id = derive_account_id(source_chain, SourceAccount::Account(source_account_id));
+				let hex_id =
+					derive_account_id(source_chain, SourceAccount::Account(source_account_id));
 				let target_id = T::AccountIdConverter::convert(hex_id);
 				log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id);
 				target_id
-			}
+			},
 		};
 
 		// filter the call
@@ -261,7 +273,7 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 				call,
 			);
 			Self::deposit_event(Event::MessageCallRejected(source_chain, id));
-			return dispatch_result;
+			return dispatch_result
 		}
 
 		// verify weight
@@ -284,12 +296,15 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 				expected_weight,
 				message.weight,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 
 		// pay dispatch fee right before dispatch
-		let pay_dispatch_fee_at_target_chain = message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
-		if pay_dispatch_fee_at_target_chain && pay_dispatch_fee(&origin_account, message.weight).is_err() {
+		let pay_dispatch_fee_at_target_chain =
+			message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
+		if pay_dispatch_fee_at_target_chain &&
+			pay_dispatch_fee(&origin_account, message.weight).is_err()
+		{
 			log::trace!(
 				target: "runtime::bridge-dispatch",
 				"Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}",
@@ -303,7 +318,7 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 				origin_account,
 				message.weight,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 		dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain;
 
@@ -343,9 +358,19 @@ impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId>
 /// For example, if a message is sent from a "regular" account on the source chain it will not be
 /// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source
 /// chain _before_ sending a message whose dispatch will be rejected on the target chain.
-pub fn verify_message_origin<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call>(
+pub fn verify_message_origin<
+	SourceChainAccountId,
+	TargetChainAccountPublic,
+	TargetChainSignature,
+	Call,
+>(
 	sender_origin: &RawOrigin<SourceChainAccountId>,
-	message: &MessagePayload<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call>,
+	message: &MessagePayload<
+		SourceChainAccountId,
+		TargetChainAccountPublic,
+		TargetChainSignature,
+		Call,
+	>,
 ) -> Result<Option<SourceChainAccountId>, BadOrigin>
 where
 	SourceChainAccountId: PartialEq + Clone,
@@ -354,21 +379,19 @@ where
 		CallOrigin::SourceRoot => {
 			ensure!(sender_origin == &RawOrigin::Root, BadOrigin);
 			Ok(None)
-		}
+		},
 		CallOrigin::TargetAccount(ref source_account_id, _, _) => {
-			ensure!(
-				sender_origin == &RawOrigin::Signed(source_account_id.clone()),
-				BadOrigin
-			);
+			ensure!(sender_origin == &RawOrigin::Signed(source_account_id.clone()), BadOrigin);
 			Ok(Some(source_account_id.clone()))
-		}
+		},
 		CallOrigin::SourceAccount(ref source_account_id) => {
 			ensure!(
-				sender_origin == &RawOrigin::Signed(source_account_id.clone()) || sender_origin == &RawOrigin::Root,
+				sender_origin == &RawOrigin::Signed(source_account_id.clone()) ||
+					sender_origin == &RawOrigin::Root,
 				BadOrigin
 			);
 			Ok(Some(source_account_id.clone()))
-		}
+		},
 	}
 }
 
@@ -533,16 +556,17 @@ mod tests {
 	const TEST_WEIGHT: Weight = 1_000_000_000;
 
 	fn new_test_ext() -> sp_io::TestExternalities {
-		let t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 		sp_io::TestExternalities::new(t)
 	}
 
 	fn prepare_message(
 		origin: CallOrigin<AccountId, TestAccountPublic, TestSignature>,
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::BridgeMessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		MessagePayload {
 			spec_version: TEST_SPEC_VERSION,
 			weight: TEST_WEIGHT,
@@ -554,20 +578,29 @@ mod tests {
 
 	fn prepare_root_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::BridgeMessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		prepare_message(CallOrigin::SourceRoot, call)
 	}
 
 	fn prepare_target_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::BridgeMessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1));
 		prepare_message(origin, call)
 	}
 
 	fn prepare_source_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::BridgeMessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		let origin = CallOrigin::SourceAccount(1);
 		prepare_message(origin, call)
 	}
@@ -578,13 +611,20 @@ mod tests {
 			let id = [0; 4];
 
 			const BAD_SPEC_VERSION: SpecVersion = 99;
-			let mut message =
-				prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
+			let mut message = prepare_root_message(Call::System(
+				<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3]),
+			));
 			let weight = message.weight;
 			message.spec_version = BAD_SPEC_VERSION;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -592,12 +632,14 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageVersionSpecMismatch(
-						SOURCE_CHAIN_ID,
-						id,
-						TEST_SPEC_VERSION,
-						BAD_SPEC_VERSION
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageVersionSpecMismatch(
+							SOURCE_CHAIN_ID,
+							id,
+							TEST_SPEC_VERSION,
+							BAD_SPEC_VERSION
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -612,13 +654,16 @@ mod tests {
 			let call_weight = call.get_dispatch_info().weight;
 			let mut message = prepare_root_message(call);
 			message.weight = 7;
-			assert!(
-				call_weight != 7,
-				"needed for test to actually trigger a weight mismatch"
-			);
+			assert!(call_weight != 7, "needed for test to actually trigger a weight mismatch");
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, 7);
 			assert!(!result.dispatch_result);
 
@@ -626,12 +671,14 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageWeightMismatch(
-						SOURCE_CHAIN_ID,
-						id,
-						call_weight,
-						7,
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageWeightMismatch(
+							SOURCE_CHAIN_ID,
+							id,
+							call_weight,
+							7,
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -651,7 +698,13 @@ mod tests {
 			let weight = message.weight;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -659,10 +712,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageSignatureMismatch(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageSignatureMismatch(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -675,7 +730,13 @@ mod tests {
 			let id = [0; 4];
 
 			System::set_block_number(1);
-			Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Err(()), |_, _| unreachable!());
+			Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Err(()),
+				|_, _| unreachable!(),
+			);
 
 			assert_eq!(
 				System::events(),
@@ -696,13 +757,20 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message =
-				prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
+			let mut message = prepare_root_message(Call::System(
+				<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3]),
+			));
 			let weight = message.weight;
 			message.call.0 = vec![];
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -710,10 +778,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallDecodeFailed(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageCallDecodeFailed(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -725,13 +795,21 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let call = Call::System(<frame_system::Call<TestRuntime>>::fill_block(Perbill::from_percent(75)));
+			let call = Call::System(<frame_system::Call<TestRuntime>>::fill_block(
+				Perbill::from_percent(75),
+			));
 			let weight = call.get_dispatch_info().weight;
 			let mut message = prepare_root_message(call);
 			message.weight = weight;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -739,10 +817,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallRejected(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageCallRejected(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -754,13 +834,17 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message =
-				prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
+			let mut message = prepare_root_message(Call::System(
+				<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3]),
+			));
 			let weight = message.weight;
 			message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Err(()));
+			let result =
+				Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| {
+					Err(())
+				});
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -768,15 +852,17 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatchPaymentFailed(
-						SOURCE_CHAIN_ID,
-						id,
-						AccountIdConverter::convert(derive_account_id::<AccountId>(
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageDispatchPaymentFailed(
 							SOURCE_CHAIN_ID,
-							SourceAccount::Root
-						)),
-						TEST_WEIGHT,
-					)),
+							id,
+							AccountIdConverter::convert(derive_account_id::<AccountId>(
+								SOURCE_CHAIN_ID,
+								SourceAccount::Root
+							)),
+							TEST_WEIGHT,
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -788,12 +874,19 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message =
-				prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
+			let mut message = prepare_root_message(Call::System(
+				<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3]),
+			));
 			message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Ok(()));
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| Ok(()),
+			);
 			assert!(result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -821,7 +914,13 @@ mod tests {
 			let message = prepare_target_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(!result.dispatch_result);
 
@@ -844,10 +943,18 @@ mod tests {
 	fn should_dispatch_bridge_message_from_root_origin() {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
-			let message = prepare_root_message(Call::System(<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3])));
+			let message = prepare_root_message(Call::System(
+				<frame_system::Call<TestRuntime>>::remark(vec![1, 2, 3]),
+			));
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -875,7 +982,13 @@ mod tests {
 			let message = prepare_target_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -903,7 +1016,13 @@ mod tests {
 			let message = prepare_source_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -931,10 +1050,7 @@ mod tests {
 		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None)));
 
 		// when message is sent by some real account, CallOrigin::SourceRoot is not allowed
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Err(BadOrigin)));
 	}
 
 	#[test]
@@ -943,23 +1059,14 @@ mod tests {
 		let message = prepare_target_message(call);
 
 		// When message is sent by Root, CallOrigin::TargetAccount is not allowed
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Root, &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Err(BadOrigin)));
 
 		// When message is sent by some other account, it is rejected
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(2), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin)));
 
 		// When message is sent by a real account, it is allowed to have origin
 		// CallOrigin::TargetAccount
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Ok(Some(1))
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1))));
 	}
 
 	#[test]
@@ -968,16 +1075,10 @@ mod tests {
 		let message = prepare_source_message(call);
 
 		// Sending a message from the expected origin account works
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Ok(Some(1))
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1))));
 
 		// If we send a message from a different account, it is rejected
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(2), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin)));
 
 		// The Root account is allowed to assume any expected origin account
 		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1))));
diff --git a/bridges/modules/ethereum-contract-builtin/src/lib.rs b/bridges/modules/ethereum-contract-builtin/src/lib.rs
index 75004fd2a92f6d8d5ae021869154bd9ede7858e3..4a830f8e0a389f916455695e59d2ec9c44b3b081 100644
--- a/bridges/modules/ethereum-contract-builtin/src/lib.rs
+++ b/bridges/modules/ethereum-contract-builtin/src/lib.rs
@@ -135,7 +135,9 @@ pub fn verify_substrate_finality_proof(
 ) -> Result<(), Error> {
 	let best_set = AuthorityList::decode(&mut &*raw_best_set)
 		.map_err(Error::BestSetDecode)
-		.and_then(|authorities| VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet));
+		.and_then(|authorities| {
+			VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet)
+		});
 
 	log::debug!(
 		target: "bridge-builtin",
@@ -150,15 +152,16 @@ pub fn verify_substrate_finality_proof(
 
 	let best_set = best_set?;
 
-	let verify_result = sc_finality_grandpa::GrandpaJustification::<Block>::decode_and_verify_finalizes(
-		raw_finality_proof,
-		(finality_target_hash, finality_target_number),
-		best_set_id,
-		&best_set,
-	)
-	.map_err(Box::new)
-	.map_err(Error::JustificationVerify)
-	.map(|_| ());
+	let verify_result =
+		sc_finality_grandpa::GrandpaJustification::<Block>::decode_and_verify_finalizes(
+			raw_finality_proof,
+			(finality_target_hash, finality_target_number),
+			best_set_id,
+			&best_set,
+		)
+		.map_err(Box::new)
+		.map_err(Error::JustificationVerify)
+		.map(|_| ());
 
 	log::debug!(
 		target: "bridge-builtin",
@@ -202,10 +205,7 @@ mod tests {
 	#[test]
 	fn from_substrate_block_number_succeeds() {
 		assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero());
-		assert_eq!(
-			from_substrate_block_number(std::u32::MAX).unwrap(),
-			U256::from(std::u32::MAX)
-		);
+		assert_eq!(from_substrate_block_number(std::u32::MAX).unwrap(), U256::from(std::u32::MAX));
 	}
 
 	#[test]
@@ -285,10 +285,7 @@ mod tests {
 					.parse()
 					.unwrap(),
 				number: 8,
-				signal: Some(ValidatorsSetSignal {
-					delay: 8,
-					validators: authorities.encode(),
-				}),
+				signal: Some(ValidatorsSetSignal { delay: 8, validators: authorities.encode() }),
 			},
 		);
 	}
@@ -296,13 +293,14 @@ mod tests {
 	/// Number of the example block with justification.
 	const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8;
 	/// Hash of the example block with justification.
-	const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
-	/// Id of authorities set that have generated example justification. Could be computed by tracking
-	/// every set change in canonized headers.
+	const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str =
+		"a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
+	/// Id of authorities set that have generated example justification. Could be computed by
+	/// tracking every set change in canonized headers.
 	const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0;
-	/// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange`
-	/// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at
-	/// appropriate block.
+	/// Encoded authorities set that has generated example justification. Could be fetched from
+	/// `ScheduledChange` digest of the block that has scheduled this set OR by calling
+	/// `GrandpaApi::grandpa_authorities()` at appropriate block.
 	const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000";
 	/// Example justification. Could be fetched by calling 'chain_getBlock' RPC.
 	const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900";
diff --git a/bridges/modules/ethereum/src/benchmarking.rs b/bridges/modules/ethereum/src/benchmarking.rs
index 98dbd04e2a829e04274a7b48147bc8c88b8ec30b..268795d8a83362578b7170b2675127c33da59106 100644
--- a/bridges/modules/ethereum/src/benchmarking.rs
+++ b/bridges/modules/ethereum/src/benchmarking.rs
@@ -17,8 +17,8 @@
 use super::*;
 
 use crate::test_utils::{
-	build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt,
-	HeaderBuilder,
+	build_custom_header, build_genesis_header, insert_header, validator_utils::*,
+	validators_change_receipt, HeaderBuilder,
 };
 
 use bp_eth_poa::{compute_merkle_root, U256};
diff --git a/bridges/modules/ethereum/src/error.rs b/bridges/modules/ethereum/src/error.rs
index ad798379da7dcc827fdb31bb8e6e4dae57ee7307..6fd376b01715f81c8e19e886844417b7683530ef 100644
--- a/bridges/modules/ethereum/src/error.rs
+++ b/bridges/modules/ethereum/src/error.rs
@@ -85,7 +85,8 @@ impl Error {
 			Error::InsufficientProof => "Header has insufficient proof",
 			Error::InvalidDifficulty => "Header has invalid difficulty",
 			Error::NotValidator => "Header is sealed by unexpected validator",
-			Error::MissingTransactionsReceipts => "The import operation requires transactions receipts",
+			Error::MissingTransactionsReceipts =>
+				"The import operation requires transactions receipts",
 			Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided",
 			Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided",
 			Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future",
diff --git a/bridges/modules/ethereum/src/finality.rs b/bridges/modules/ethereum/src/finality.rs
index 934db0430ab8c4a62f3985dfc7d00fd2d35ddf81..34b766f7e0219b16ed9b723e092dd4555480ef49 100644
--- a/bridges/modules/ethereum/src/finality.rs
+++ b/bridges/modules/ethereum/src/finality.rs
@@ -14,18 +14,19 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::Storage;
+use crate::{error::Error, Storage};
 use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256};
 use codec::{Decode, Encode};
 use sp_io::crypto::secp256k1_ecdsa_recover;
 use sp_runtime::RuntimeDebug;
-use sp_std::collections::{
-	btree_map::{BTreeMap, Entry},
-	btree_set::BTreeSet,
-	vec_deque::VecDeque,
+use sp_std::{
+	collections::{
+		btree_map::{BTreeMap, Entry},
+		btree_set::BTreeSet,
+		vec_deque::VecDeque,
+	},
+	prelude::*,
 };
-use sp_std::prelude::*;
 
 /// Cached finality votes for given block.
 #[derive(RuntimeDebug)]
@@ -116,17 +117,14 @@ pub fn finalize_blocks<S: Storage>(
 			&current_votes,
 			ancestor.id.number >= two_thirds_majority_transition,
 		) {
-			break;
+			break
 		}
 
 		remove_signers_votes(&ancestor.signers, &mut current_votes);
 		finalized_headers.push((ancestor.id, ancestor.submitter.clone()));
 	}
 
-	Ok(FinalityEffects {
-		finalized_headers,
-		votes,
-	})
+	Ok(FinalityEffects { finalized_headers, votes })
 }
 
 /// Returns true if there are enough votes to treat this header as finalized.
@@ -135,8 +133,8 @@ fn is_finalized(
 	votes: &BTreeMap<Address, u64>,
 	requires_two_thirds_majority: bool,
 ) -> bool {
-	(!requires_two_thirds_majority && votes.len() * 2 > validators.len())
-		|| (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2)
+	(!requires_two_thirds_majority && votes.len() * 2 > validators.len()) ||
+		(requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2)
 }
 
 /// Prepare 'votes' of header and its ancestors' signers.
@@ -151,12 +149,12 @@ pub(crate) fn prepare_votes<Submitter>(
 	// if we have reached finalized block sibling, then we're trying
 	// to switch finalized blocks
 	if cached_votes.stopped_at_finalized_sibling {
-		return Err(Error::TryingToFinalizeSibling);
+		return Err(Error::TryingToFinalizeSibling)
 	}
 
 	// this fn can only work with single validators set
 	if !validators.contains(&header.author) {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	// now we have votes that were valid when some block B has been inserted
@@ -171,7 +169,7 @@ pub(crate) fn prepare_votes<Submitter>(
 	while let Some(old_ancestor) = votes.ancestry.pop_front() {
 		if old_ancestor.id.number > best_finalized.number {
 			votes.ancestry.push_front(old_ancestor);
-			break;
+			break
 		}
 
 		remove_signers_votes(&old_ancestor.signers, &mut votes.votes);
@@ -180,7 +178,9 @@ pub(crate) fn prepare_votes<Submitter>(
 	// add votes from new blocks
 	let mut parent_empty_step_signers = empty_steps_signers(header);
 	let mut unaccounted_ancestry = VecDeque::new();
-	while let Some((ancestor_id, ancestor_submitter, ancestor)) = cached_votes.unaccounted_ancestry.pop_front() {
+	while let Some((ancestor_id, ancestor_submitter, ancestor)) =
+		cached_votes.unaccounted_ancestry.pop_front()
+	{
 		let mut signers = empty_steps_signers(&ancestor);
 		sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers);
 		signers.insert(ancestor.author);
@@ -199,11 +199,9 @@ pub(crate) fn prepare_votes<Submitter>(
 	let mut header_signers = BTreeSet::new();
 	header_signers.insert(header.author);
 	*votes.votes.entry(header.author).or_insert(0) += 1;
-	votes.ancestry.push_back(FinalityAncestor {
-		id,
-		submitter,
-		signers: header_signers,
-	});
+	votes
+		.ancestry
+		.push_back(FinalityAncestor { id, submitter, signers: header_signers });
 
 	Ok(votes)
 }
@@ -217,7 +215,7 @@ fn add_signers_votes(
 ) -> Result<(), Error> {
 	for signer in signers_to_add {
 		if !validators.contains(signer) {
-			return Err(Error::NotValidator);
+			return Err(Error::NotValidator)
 		}
 
 		*votes.entry(*signer).or_insert(0) += 1;
@@ -230,13 +228,12 @@ fn add_signers_votes(
 fn remove_signers_votes(signers_to_remove: &BTreeSet<Address>, votes: &mut BTreeMap<Address, u64>) {
 	for signer in signers_to_remove {
 		match votes.entry(*signer) {
-			Entry::Occupied(mut entry) => {
+			Entry::Occupied(mut entry) =>
 				if *entry.get() <= 1 {
 					entry.remove();
 				} else {
 					*entry.get_mut() -= 1;
-				}
-			}
+				},
 			Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"),
 		}
 	}
@@ -272,18 +269,19 @@ impl<Submitter> Default for CachedFinalityVotes<Submitter> {
 
 impl<Submitter> Default for FinalityVotes<Submitter> {
 	fn default() -> Self {
-		FinalityVotes {
-			votes: BTreeMap::new(),
-			ancestry: VecDeque::new(),
-		}
+		FinalityVotes { votes: BTreeMap::new(), ancestry: VecDeque::new() }
 	}
 }
 
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime};
-	use crate::{BridgeStorage, FinalityCache, HeaderToImport};
+	use crate::{
+		mock::{
+			insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime,
+		},
+		BridgeStorage, FinalityCache, HeaderToImport,
+	};
 
 	const TOTAL_VALIDATORS: usize = 5;
 
@@ -341,7 +339,8 @@ mod tests {
 			storage.insert_header(header_to_import.clone());
 
 			// when header#2 is inserted, nothing is finalized (2 votes)
-			header_to_import.header = HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1));
+			header_to_import.header =
+				HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1));
 			header_to_import.id = header_to_import.header.compute_id();
 			let id2 = header_to_import.header.compute_id();
 			assert_eq!(
@@ -360,7 +359,8 @@ mod tests {
 			storage.insert_header(header_to_import.clone());
 
 			// when header#3 is inserted, header#1 is finalized (3 votes)
-			header_to_import.header = HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2));
+			header_to_import.header =
+				HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2));
 			header_to_import.id = header_to_import.header.compute_id();
 			let id3 = header_to_import.header.compute_id();
 			assert_eq!(
@@ -390,7 +390,9 @@ mod tests {
 		// 2) add votes from header#4 and header#5
 		let validators = validators_addresses(5);
 		let headers = (1..6)
-			.map(|number| HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1)))
+			.map(|number| {
+				HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1))
+			})
 			.collect::<Vec<_>>();
 		let ancestry = headers
 			.iter()
@@ -405,9 +407,10 @@ mod tests {
 			prepare_votes::<()>(
 				CachedFinalityVotes {
 					stopped_at_finalized_sibling: false,
-					unaccounted_ancestry: vec![(headers[3].compute_id(), None, headers[3].clone()),]
-						.into_iter()
-						.collect(),
+					unaccounted_ancestry:
+						vec![(headers[3].compute_id(), None, headers[3].clone()),]
+							.into_iter()
+							.collect(),
 					votes: Some(FinalityVotes {
 						votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),]
 							.into_iter()
@@ -445,7 +448,8 @@ mod tests {
 			let mut ancestry = Vec::new();
 			let mut parent_hash = ctx.genesis.compute_hash();
 			for i in 1..10 {
-				let header = HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3));
+				let header =
+					HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3));
 				let id = header.compute_id();
 				insert_header(&mut storage, header.clone());
 				hashes.push(id.hash);
@@ -539,10 +543,7 @@ mod tests {
 	fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() {
 		assert_eq!(
 			prepare_votes::<()>(
-				CachedFinalityVotes {
-					stopped_at_finalized_sibling: true,
-					..Default::default()
-				},
+				CachedFinalityVotes { stopped_at_finalized_sibling: true, ..Default::default() },
 				Default::default(),
 				&validators_addresses(3).iter().collect(),
 				Default::default(),
diff --git a/bridges/modules/ethereum/src/import.rs b/bridges/modules/ethereum/src/import.rs
index 9f93d8dcb2eb4d9e5c7e60f9dbd46189b9761357..3c99451ca8f180be951aa5b7b0c065dcfd568314 100644
--- a/bridges/modules/ethereum/src/import.rs
+++ b/bridges/modules/ethereum/src/import.rs
@@ -14,11 +14,13 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::finality::finalize_blocks;
-use crate::validators::{Validators, ValidatorsConfiguration};
-use crate::verification::{is_importable_header, verify_aura_header};
-use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage};
+use crate::{
+	error::Error,
+	finality::finalize_blocks,
+	validators::{Validators, ValidatorsConfiguration},
+	verification::{is_importable_header, verify_aura_header},
+	AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage,
+};
 use bp_eth_poa::{AuraHeader, HeaderId, Receipt};
 use sp_std::{collections::btree_map::BTreeMap, prelude::*};
 
@@ -65,7 +67,7 @@ pub fn import_headers<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 					}
 				}
 				useful += 1;
-			}
+			},
 			Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1,
 			Err(error) => return Err(error),
 		}
@@ -103,7 +105,8 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 
 	// check if block schedules new validators
 	let validators = Validators::new(validators_config);
-	let (scheduled_change, enacted_change) = validators.extract_validators_change(&header, receipts)?;
+	let (scheduled_change, enacted_change) =
+		validators.extract_validators_change(&header, receipts)?;
 
 	// check if block finalizes some other blocks and corresponding scheduled validators
 	let validators_set = import_context.validators_set();
@@ -117,11 +120,10 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 		aura_config.two_thirds_majority_transition,
 	)?;
 	let enacted_change = enacted_change
-		.map(|validators| ChangeToEnact {
-			signal_block: None,
-			validators,
-		})
-		.or_else(|| validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers));
+		.map(|validators| ChangeToEnact { signal_block: None, validators })
+		.or_else(|| {
+			validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers)
+		});
 
 	// NOTE: we can't return Err() from anywhere below this line
 	// (because otherwise we'll have inconsistent storage if transaction will fail)
@@ -145,9 +147,7 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 	let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id);
 	let pruning_upper_bound = pruning_strategy.pruning_upper_bound(
 		new_best_block_id.number,
-		new_best_finalized_block_id
-			.map(|id| id.number)
-			.unwrap_or(finalized_id.number),
+		new_best_finalized_block_id.map(|id| id.number).unwrap_or(finalized_id.number),
 	);
 
 	// now mark finalized headers && prune old headers
@@ -171,12 +171,15 @@ pub fn header_import_requires_receipts<S: Storage>(
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{
-		run_test, secret_to_address, test_aura_config, test_validators_config, validator, validators_addresses,
-		validators_change_receipt, HeaderBuilder, KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT,
+	use crate::{
+		mock::{
+			run_test, secret_to_address, test_aura_config, test_validators_config, validator,
+			validators_addresses, validators_change_receipt, HeaderBuilder,
+			KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT,
+		},
+		validators::ValidatorsSource,
+		BlocksToPrune, BridgeStorage, Headers, PruningRange,
 	};
-	use crate::validators::ValidatorsSource;
-	use crate::{BlocksToPrune, BridgeStorage, Headers, PruningRange};
 	use secp256k1::SecretKey;
 
 	const TOTAL_VALIDATORS: usize = 3;
@@ -186,10 +189,7 @@ mod tests {
 		run_test(TOTAL_VALIDATORS, |_| {
 			let mut storage = BridgeStorage::<TestRuntime>::new();
 			storage.finalize_and_prune_headers(
-				Some(HeaderId {
-					number: 100,
-					..Default::default()
-				}),
+				Some(HeaderId { number: 100, ..Default::default() }),
 				0,
 			);
 			assert_eq!(
@@ -281,8 +281,10 @@ mod tests {
 	#[test]
 	fn headers_are_pruned_during_import() {
 		run_test(TOTAL_VALIDATORS, |ctx| {
-			let validators_config =
-				ValidatorsConfiguration::Single(ValidatorsSource::Contract([3; 20].into(), ctx.addresses.clone()));
+			let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				[3; 20].into(),
+				ctx.addresses.clone(),
+			));
 			let validators = vec![validator(0), validator(1), validator(2)];
 			let mut storage = BridgeStorage::<TestRuntime>::new();
 
@@ -305,7 +307,8 @@ mod tests {
 				)
 				.unwrap();
 				match i {
-					2..=10 => assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,),
+					2..=10 =>
+						assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,),
 					_ => assert_eq!(finalized_blocks, vec![], "At {}", i),
 				}
 				latest_block_id = rolling_last_block_id;
@@ -339,8 +342,8 @@ mod tests {
 			latest_block_id = rolling_last_block_id;
 
 			// and now let's say validators 1 && 2 went offline
-			// => in the range 12-25 no blocks are finalized, but we still continue to prune old headers
-			// until header#11 is met. we can't prune #11, because it schedules change
+			// => in the range 12-25 no blocks are finalized, but we still continue to prune old
+			// headers until header#11 is met. we can't prune #11, because it schedules change
 			let mut step = 56u64;
 			let mut expected_blocks = vec![(header11.compute_id(), Some(101))];
 			for i in 12..25 {
@@ -366,10 +369,7 @@ mod tests {
 			}
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 11,
-					oldest_block_to_keep: 14,
-				},
+				PruningRange { oldest_unpruned_block: 11, oldest_block_to_keep: 14 },
 			);
 
 			// now let's insert block signed by validator 1
@@ -393,10 +393,7 @@ mod tests {
 			assert_eq!(finalized_blocks, expected_blocks);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 15,
-					oldest_block_to_keep: 15,
-				},
+				PruningRange { oldest_unpruned_block: 15, oldest_block_to_keep: 15 },
 			);
 		});
 	}
@@ -483,9 +480,7 @@ mod tests {
 			let header1 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(0)
-					.step(2)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(0).step(2).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header1);
@@ -495,9 +490,7 @@ mod tests {
 			let header2 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(1)
-					.step(3)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(1).step(3).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header2);
@@ -507,9 +500,7 @@ mod tests {
 			let header3 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(2)
-					.step(4)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(2).step(4).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header3);
@@ -552,19 +543,19 @@ mod tests {
 			assert_eq!(storage.best_block().0, header5_1);
 			assert_eq!(storage.finalized_block(), header1);
 
-			// when we import header4 { parent = header3 }, authored by validator[0], header2 is finalized
+			// when we import header4 { parent = header3 }, authored by validator[0], header2 is
+			// finalized
 			let header4 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(3)
-					.step(5)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(3).step(5).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header5_1);
 			assert_eq!(storage.finalized_block(), header2);
 
-			// when we import header5 { parent = header4 }, authored by validator[1], header3 is finalized
+			// when we import header5 { parent = header4 }, authored by validator[1], header3 is
+			// finalized
 			let header5 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
@@ -576,7 +567,8 @@ mod tests {
 			assert_eq!(storage.best_block().0, header5);
 			assert_eq!(storage.finalized_block(), header3);
 
-			// import of header2'' { parent = header1 } fails, because it has number < best_finalized
+			// import of header2'' { parent = header1 } fails, because it has number <
+			// best_finalized
 			assert_eq!(
 				import_custom_block(
 					&mut storage,
diff --git a/bridges/modules/ethereum/src/lib.rs b/bridges/modules/ethereum/src/lib.rs
index 88a35f365e40bbd4256d6d38ec29ca0c689ba87b..6234a02cca13d4365e9439a75a3452cbcc0b4c4a 100644
--- a/bridges/modules/ethereum/src/lib.rs
+++ b/bridges/modules/ethereum/src/lib.rs
@@ -19,7 +19,9 @@
 #![allow(clippy::large_enum_variant)]
 
 use crate::finality::{CachedFinalityVotes, FinalityVotes};
-use bp_eth_poa::{Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256};
+use bp_eth_poa::{
+	Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256,
+};
 use codec::{Decode, Encode};
 use frame_support::traits::Get;
 use sp_runtime::RuntimeDebug;
@@ -222,10 +224,7 @@ impl<Submitter> ImportContext<Submitter> {
 	/// This may point to parent if parent has signaled change.
 	pub fn last_signal_block(&self) -> Option<HeaderId> {
 		match self.parent_scheduled_change {
-			Some(_) => Some(HeaderId {
-				number: self.parent_header.number,
-				hash: self.parent_hash,
-			}),
+			Some(_) => Some(HeaderId { number: self.parent_header.number, hash: self.parent_hash }),
 			None => self.last_signal_block,
 		}
 	}
@@ -313,8 +312,8 @@ pub trait PruningStrategy: Default {
 	/// number greater than or equal to N even if strategy allows that.
 	///
 	/// If your strategy allows pruning unfinalized blocks, this could lead to switch
-	/// between finalized forks (only if authorities are misbehaving). But since 50 percent plus one (or 2/3)
-	/// authorities are able to do whatever they want with the chain, this isn't considered
+	/// between finalized forks (only if authorities are misbehaving). But since 50 percent plus one
+	/// (or 2/3) authorities are able to do whatever they want with the chain, this isn't considered
 	/// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize
 	/// header that isn't descendant of current best finalized block.
 	fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64;
@@ -343,10 +342,10 @@ impl ChainTime for () {
 pub trait OnHeadersSubmitted<AccountId> {
 	/// Called when valid headers have been submitted.
 	///
-	/// The submitter **must not** be rewarded for submitting valid headers, because greedy authority
-	/// could produce and submit multiple valid headers (without relaying them to other peers) and
-	/// get rewarded. Instead, the provider could track submitters and stop rewarding if too many
-	/// headers have been submitted without finalization.
+	/// The submitter **must not** be rewarded for submitting valid headers, because greedy
+	/// authority could produce and submit multiple valid headers (without relaying them to other
+	/// peers) and get rewarded. Instead, the provider could track submitters and stop rewarding if
+	/// too many headers have been submitted without finalization.
 	fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64);
 	/// Called when invalid headers have been submitted.
 	fn on_invalid_headers_submitted(submitter: AccountId);
@@ -459,13 +458,14 @@ pub mod pallet {
 
 			// now track/penalize current submitter for providing new headers
 			match import_result {
-				Ok((useful, useless)) => T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless),
+				Ok((useful, useless)) =>
+					T::OnHeadersSubmitted::on_valid_headers_submitted(submitter, useful, useless),
 				Err(error) => {
 					// even though we may have accept some headers, we do not want to reward someone
 					// who provides invalid headers
 					T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter);
-					return Err(error.msg().into());
-				}
+					return Err(error.msg().into())
+				},
 			}
 
 			Ok(())
@@ -500,12 +500,13 @@ pub mod pallet {
 						// UnsignedTooFarInTheFuture is the special error code used to limit
 						// number of transactions in the pool - we do not want to ban transaction
 						// in this case (see verification.rs for details)
-						Err(error::Error::UnsignedTooFarInTheFuture) => {
-							UnknownTransaction::Custom(error::Error::UnsignedTooFarInTheFuture.code()).into()
-						}
+						Err(error::Error::UnsignedTooFarInTheFuture) => UnknownTransaction::Custom(
+							error::Error::UnsignedTooFarInTheFuture.code(),
+						)
+						.into(),
 						Err(error) => InvalidTransaction::Custom(error.code()).into(),
 					}
-				}
+				},
 				_ => InvalidTransaction::Call.into(),
 			}
 		}
@@ -513,23 +514,28 @@ pub mod pallet {
 
 	/// Best known block.
 	#[pallet::storage]
-	pub(super) type BestBlock<T: Config<I>, I: 'static = ()> = StorageValue<_, (HeaderId, U256), ValueQuery>;
+	pub(super) type BestBlock<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, (HeaderId, U256), ValueQuery>;
 
 	/// Best finalized block.
 	#[pallet::storage]
-	pub(super) type FinalizedBlock<T: Config<I>, I: 'static = ()> = StorageValue<_, HeaderId, ValueQuery>;
+	pub(super) type FinalizedBlock<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, HeaderId, ValueQuery>;
 
 	/// Range of blocks that we want to prune.
 	#[pallet::storage]
-	pub(super) type BlocksToPrune<T: Config<I>, I: 'static = ()> = StorageValue<_, PruningRange, ValueQuery>;
+	pub(super) type BlocksToPrune<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, PruningRange, ValueQuery>;
 
 	/// Map of imported headers by hash.
 	#[pallet::storage]
-	pub(super) type Headers<T: Config<I>, I: 'static = ()> = StorageMap<_, Identity, H256, StoredHeader<T::AccountId>>;
+	pub(super) type Headers<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, StoredHeader<T::AccountId>>;
 
 	/// Map of imported header hashes by number.
 	#[pallet::storage]
-	pub(super) type HeadersByNumber<T: Config<I>, I: 'static = ()> = StorageMap<_, Blake2_128Concat, u64, Vec<H256>>;
+	pub(super) type HeadersByNumber<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Blake2_128Concat, u64, Vec<H256>>;
 
 	/// Map of cached finality data by header hash.
 	#[pallet::storage]
@@ -538,17 +544,20 @@ pub mod pallet {
 
 	/// The ID of next validator set.
 	#[pallet::storage]
-	pub(super) type NextValidatorsSetId<T: Config<I>, I: 'static = ()> = StorageValue<_, u64, ValueQuery>;
+	pub(super) type NextValidatorsSetId<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, u64, ValueQuery>;
 
 	/// Map of validators sets by their id.
 	#[pallet::storage]
-	pub(super) type ValidatorsSets<T: Config<I>, I: 'static = ()> = StorageMap<_, Twox64Concat, u64, ValidatorsSet>;
+	pub(super) type ValidatorsSets<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Twox64Concat, u64, ValidatorsSet>;
 
 	/// Validators sets reference count. Each header that is authored by this set increases
 	/// the reference count. When we prune this header, we decrease the reference count.
 	/// When it reaches zero, we are free to prune validator set as well.
 	#[pallet::storage]
-	pub(super) type ValidatorsSetsRc<T: Config<I>, I: 'static = ()> = StorageMap<_, Twox64Concat, u64, u64>;
+	pub(super) type ValidatorsSetsRc<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Twox64Concat, u64, u64>;
 
 	/// Map of validators set changes scheduled by given header.
 	#[pallet::storage]
@@ -572,14 +581,16 @@ pub mod pallet {
 			// the initial blocks should be selected so that:
 			// 1) it doesn't signal validators changes;
 			// 2) there are no scheduled validators changes from previous blocks;
-			// 3) (implied) all direct children of initial block are authored by the same validators set.
+			// 3) (implied) all direct children of initial block are authored by the same validators
+			// set.
 
-			assert!(
-				!self.initial_validators.is_empty(),
-				"Initial validators set can't be empty",
-			);
+			assert!(!self.initial_validators.is_empty(), "Initial validators set can't be empty",);
 
-			initialize_storage::<T, I>(&self.initial_header, self.initial_difficulty, &self.initial_validators);
+			initialize_storage::<T, I>(
+				&self.initial_header,
+				self.initial_difficulty,
+				&self.initial_validators,
+			);
 		}
 	}
 }
@@ -648,7 +659,7 @@ impl<T: Config<I>, I: 'static> BridgeStorage<T, I> {
 		for number in begin..end {
 			// if we can't prune anything => break
 			if max_blocks_to_prune == 0 {
-				break;
+				break
 			}
 
 			// read hashes of blocks with given number and try to prune these blocks
@@ -664,7 +675,7 @@ impl<T: Config<I>, I: 'static> BridgeStorage<T, I> {
 				// if we haven't pruned all blocks, remember unpruned
 				if !blocks_at_number.is_empty() {
 					HeadersByNumber::<T, I>::insert(number, blocks_at_number);
-					break;
+					break
 				}
 			}
 
@@ -692,8 +703,10 @@ impl<T: Config<I>, I: 'static> BridgeStorage<T, I> {
 		blocks_at_number: &mut Vec<H256>,
 	) {
 		// ensure that unfinalized headers we want to prune do not have scheduled changes
-		if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::<T, I>::contains_key) {
-			return;
+		if number > finalized_number &&
+			blocks_at_number.iter().any(ScheduledChanges::<T, I>::contains_key)
+		{
+			return
 		}
 
 		// physically remove headers and (probably) obsolete validators sets
@@ -718,7 +731,7 @@ impl<T: Config<I>, I: 'static> BridgeStorage<T, I> {
 			// check if we have already pruned too much headers in this call
 			*max_blocks_to_prune -= 1;
 			if *max_blocks_to_prune == 0 {
-				return;
+				return
 			}
 		}
 	}
@@ -749,21 +762,22 @@ impl<T: Config<I>, I: 'static> Storage for BridgeStorage<T, I> {
 		let mut current_id = *parent;
 		loop {
 			// if we have reached finalized block's sibling => stop with special signal
-			if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash {
+			if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash
+			{
 				votes.stopped_at_finalized_sibling = true;
-				return votes;
+				return votes
 			}
 
 			// if we have reached target header => stop
 			if stop_at(&current_id.hash) {
-				return votes;
+				return votes
 			}
 
 			// if we have found cached votes => stop
 			let cached_votes = FinalityCache::<T, I>::get(&current_id.hash);
 			if let Some(cached_votes) = cached_votes {
 				votes.votes = Some(cached_votes);
-				return votes;
+				return votes
 			}
 
 			// read next parent header id
@@ -792,7 +806,9 @@ impl<T: Config<I>, I: 'static> Storage for BridgeStorage<T, I> {
 	) -> Option<ImportContext<Self::Submitter>> {
 		Headers::<T, I>::get(parent_hash).map(|parent_header| {
 			let validators_set = ValidatorsSets::<T, I>::get(parent_header.next_validators_set_id)
-				.expect("validators set is only pruned when last ref is pruned; there is a ref; qed");
+				.expect(
+					"validators set is only pruned when last ref is pruned; there is a ref; qed",
+				);
 			let parent_scheduled_change = ScheduledChanges::<T, I>::get(parent_hash);
 			ImportContext {
 				submitter,
@@ -841,19 +857,20 @@ impl<T: Config<I>, I: 'static> Storage for BridgeStorage<T, I> {
 				);
 				ValidatorsSetsRc::<T, I>::insert(next_validators_set_id, 1);
 				next_validators_set_id
-			}
+			},
 			None => {
 				ValidatorsSetsRc::<T, I>::mutate(header.context.validators_set_id, |rc| {
 					*rc = Some(rc.map(|rc| rc + 1).unwrap_or(1));
 					*rc
 				});
 				header.context.validators_set_id
-			}
+			},
 		};
 
 		let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get();
 		if let Some(finality_votes_caching_interval) = finality_votes_caching_interval {
-			let cache_entry_required = header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0;
+			let cache_entry_required =
+				header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0;
 			if cache_entry_required {
 				FinalityCache::<T, I>::insert(header.id.hash, header.finality_votes);
 			}
@@ -917,10 +934,7 @@ pub(crate) fn initialize_storage<T: Config<I>, I: 'static>(
 		initial_hash,
 	);
 
-	let initial_id = HeaderId {
-		number: initial_header.number,
-		hash: initial_hash,
-	};
+	let initial_id = HeaderId { number: initial_header.number, hash: initial_hash };
 	BestBlock::<T, I>::put((initial_id, initial_difficulty));
 	FinalizedBlock::<T, I>::put(initial_id);
 	BlocksToPrune::<T, I>::put(PruningRange {
@@ -965,7 +979,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			proof.len(),
 		);
 
-		return false;
+		return false
 	}
 
 	let header = match storage.header(&block) {
@@ -977,8 +991,8 @@ pub fn verify_transaction_finalized<S: Storage>(
 				block,
 			);
 
-			return false;
-		}
+			return false
+		},
 	};
 	let finalized = storage.finalized_block();
 
@@ -992,7 +1006,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			finalized.number,
 		);
 
-		return false;
+		return false
 	}
 
 	// check if header is actually finalized
@@ -1010,7 +1024,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			finalized.hash,
 		);
 
-		return false;
+		return false
 	}
 
 	// verify that transaction is included in the block
@@ -1022,7 +1036,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			computed_root,
 		);
 
-		return false;
+		return false
 	}
 
 	// verify that transaction receipt is included in the block
@@ -1034,7 +1048,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			computed_root,
 		);
 
-		return false;
+		return false
 	}
 
 	// check that transaction has completed successfully
@@ -1048,7 +1062,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			);
 
 			false
-		}
+		},
 		Err(err) => {
 			log::trace!(
 				target: "runtime",
@@ -1057,23 +1071,24 @@ pub fn verify_transaction_finalized<S: Storage>(
 			);
 
 			false
-		}
+		},
 	}
 }
 
 /// Transaction pool configuration.
 fn pool_configuration() -> PoolConfiguration {
-	PoolConfiguration {
-		max_future_number_difference: 10,
-	}
+	PoolConfiguration { max_future_number_difference: 10 }
 }
 
 /// Return iterator of given header ancestors.
-fn ancestry<S: Storage>(storage: &'_ S, mut parent_hash: H256) -> impl Iterator<Item = (H256, AuraHeader)> + '_ {
+fn ancestry<S: Storage>(
+	storage: &'_ S,
+	mut parent_hash: H256,
+) -> impl Iterator<Item = (H256, AuraHeader)> + '_ {
 	sp_std::iter::from_fn(move || {
 		let (header, _) = storage.header(&parent_hash)?;
 		if header.number == 0 {
-			return None;
+			return None
 		}
 
 		let hash = parent_hash;
@@ -1085,12 +1100,14 @@ fn ancestry<S: Storage>(storage: &'_ S, mut parent_hash: H256) -> impl Iterator<
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::finality::FinalityAncestor;
-	use crate::mock::{
-		genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, HeaderBuilder, TestRuntime,
-		GAS_LIMIT,
+	use crate::{
+		finality::FinalityAncestor,
+		mock::{
+			genesis, insert_header, run_test, run_test_with_genesis, validators_addresses,
+			HeaderBuilder, TestRuntime, GAS_LIMIT,
+		},
+		test_utils::validator_utils::*,
 	};
-	use crate::test_utils::validator_utils::*;
 	use bp_eth_poa::compute_merkle_root;
 
 	const TOTAL_VALIDATORS: usize = 3;
@@ -1182,10 +1199,7 @@ pub(crate) mod tests {
 			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&5).unwrap().len(), 5);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 5,
-					oldest_block_to_keep: 5,
-				},
+				PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 },
 			);
 		});
 	}
@@ -1202,10 +1216,7 @@ pub(crate) mod tests {
 			storage.prune_blocks(0xFFFF, 10, 3);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 5,
-					oldest_block_to_keep: 5,
-				},
+				PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 },
 			);
 		});
 	}
@@ -1221,10 +1232,7 @@ pub(crate) mod tests {
 			assert!(HeadersByNumber::<TestRuntime, ()>::get(&3).is_some());
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 0,
-					oldest_block_to_keep: 10,
-				},
+				PruningRange { oldest_unpruned_block: 0, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1242,10 +1250,7 @@ pub(crate) mod tests {
 			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&2).unwrap().len(), 4);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 2,
-					oldest_block_to_keep: 10,
-				},
+				PruningRange { oldest_unpruned_block: 2, oldest_block_to_keep: 10 },
 			);
 
 			// try to prune blocks [2; 10)
@@ -1258,10 +1263,7 @@ pub(crate) mod tests {
 			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&4).unwrap().len(), 3);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 4,
-					oldest_block_to_keep: 10,
-				},
+				PruningRange { oldest_unpruned_block: 4, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1284,10 +1286,7 @@ pub(crate) mod tests {
 			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&7).unwrap().len(), 5);
 			assert_eq!(
 				BlocksToPrune::<TestRuntime, ()>::get(),
-				PruningRange {
-					oldest_unpruned_block: 7,
-					oldest_block_to_keep: 10,
-				},
+				PruningRange { oldest_unpruned_block: 7, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1307,7 +1306,8 @@ pub(crate) mod tests {
 			}
 
 			// for header with number = interval, cache entry is created
-			let header_with_entry = HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators);
+			let header_with_entry =
+				HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators);
 			let header_with_entry_hash = header_with_entry.compute_hash();
 			insert_header(&mut storage, header_with_entry);
 			assert!(FinalityCache::<TestRuntime>::get(&header_with_entry_hash).is_some());
@@ -1354,10 +1354,7 @@ pub(crate) mod tests {
 			let votes_at_3 = FinalityVotes {
 				votes: vec![([42; 20].into(), 21)].into_iter().collect(),
 				ancestry: vec![FinalityAncestor {
-					id: HeaderId {
-						number: 100,
-						hash: Default::default(),
-					},
+					id: HeaderId { number: 100, hash: Default::default() },
 					..Default::default()
 				}]
 				.into_iter()
diff --git a/bridges/modules/ethereum/src/mock.rs b/bridges/modules/ethereum/src/mock.rs
index 00c10d3cc3e0f8af7850f614f4be1f35c5cd8b7c..e0439444020c7c04db82dc7be5285c359ff2ae4b 100644
--- a/bridges/modules/ethereum/src/mock.rs
+++ b/bridges/modules/ethereum/src/mock.rs
@@ -17,11 +17,15 @@
 // From construct_runtime macro
 #![allow(clippy::from_over_into)]
 
-pub use crate::test_utils::{insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT};
+pub use crate::test_utils::{
+	insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT,
+};
 pub use bp_eth_poa::signatures::secret_to_address;
 
-use crate::validators::{ValidatorsConfiguration, ValidatorsSource};
-use crate::{AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy};
+use crate::{
+	validators::{ValidatorsConfiguration, ValidatorsSource},
+	AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy,
+};
 use bp_eth_poa::{Address, AuraHeader, H256, U256};
 use frame_support::{parameter_types, traits::GenesisBuild, weights::Weight};
 use secp256k1::SecretKey;
@@ -154,14 +158,7 @@ pub fn run_test_with_genesis<T>(
 		})
 		.unwrap(),
 	)
-	.execute_with(|| {
-		test(TestContext {
-			genesis,
-			total_validators,
-			validators,
-			addresses,
-		})
-	})
+	.execute_with(|| test(TestContext { genesis, total_validators, validators, addresses }))
 }
 
 /// Pruning strategy that keeps 10 headers behind best block.
diff --git a/bridges/modules/ethereum/src/test_utils.rs b/bridges/modules/ethereum/src/test_utils.rs
index bc41b2286f9e50d883b73b2dc8127dd06570862b..343c7b1cb3646f4dff829f3a6dcc46454c02395d 100644
--- a/bridges/modules/ethereum/src/test_utils.rs
+++ b/bridges/modules/ethereum/src/test_utils.rs
@@ -24,10 +24,10 @@
 // Since this is test code it's fine that not everything is used
 #![allow(dead_code)]
 
-use crate::finality::FinalityVotes;
-use crate::validators::CHANGE_EVENT_HASH;
-use crate::verification::calculate_score;
-use crate::{Config, HeaderToImport, Storage};
+use crate::{
+	finality::FinalityVotes, validators::CHANGE_EVENT_HASH, verification::calculate_score, Config,
+	HeaderToImport, Storage,
+};
 
 use bp_eth_poa::{
 	rlp_encode,
@@ -130,10 +130,7 @@ impl HeaderBuilder {
 		let sealed_empty_steps = empty_steps
 			.iter()
 			.map(|(author, step)| {
-				let mut empty_step = SealedEmptyStep {
-					step: *step,
-					signature: Default::default(),
-				};
+				let mut empty_step = SealedEmptyStep { step: *step, signature: Default::default() };
 				let message = empty_step.message(&self.header.parent_hash);
 				let signature: [u8; 65] = sign(author, message).into();
 				empty_step.signature = signature.into();
@@ -216,7 +213,11 @@ pub fn build_genesis_header(author: &SecretKey) -> AuraHeader {
 }
 
 /// Helper function for building a custom child header which has been signed by an authority.
-pub fn build_custom_header<F>(author: &SecretKey, previous: &AuraHeader, customize_header: F) -> AuraHeader
+pub fn build_custom_header<F>(
+	author: &SecretKey,
+	previous: &AuraHeader,
+	customize_header: F,
+) -> AuraHeader
 where
 	F: FnOnce(AuraHeader) -> AuraHeader,
 {
@@ -232,7 +233,8 @@ pub fn insert_header<S: Storage>(storage: &mut S, header: AuraHeader) {
 	let id = header.compute_id();
 	let best_finalized = storage.finalized_block();
 	let import_context = storage.import_context(None, &header.parent_hash).unwrap();
-	let parent_finality_votes = storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false);
+	let parent_finality_votes =
+		storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false);
 	let finality_votes = crate::finality::prepare_votes(
 		parent_finality_votes,
 		best_finalized,
@@ -284,9 +286,10 @@ pub fn validators_change_receipt(parent_hash: H256) -> Receipt {
 			address: [3; 20].into(),
 			topics: vec![CHANGE_EVENT_HASH.into(), parent_hash],
 			data: vec![
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7,
-				7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+				7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 			],
 		}],
 	}
diff --git a/bridges/modules/ethereum/src/validators.rs b/bridges/modules/ethereum/src/validators.rs
index e8102a527b2888acec9c702bd4ff581fd89829b6..020e6fa7961269725f4d89678f319eb1e9cd6a19 100644
--- a/bridges/modules/ethereum/src/validators.rs
+++ b/bridges/modules/ethereum/src/validators.rs
@@ -14,15 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::{ChangeToEnact, Storage};
+use crate::{error::Error, ChangeToEnact, Storage};
 use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256};
 use sp_std::prelude::*;
 
 /// The hash of InitiateChange event of the validators set contract.
 pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[
-	0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28,
-	0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89,
+	0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f,
+	0xd2, 0xc2, 0x28, 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89,
 ];
 
 /// Where source of validators addresses come from. This covers the chain lifetime.
@@ -104,7 +103,8 @@ impl<'a> Validators<'a> {
 		if next_starts_at == header.number {
 			match *next_source {
 				ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))),
-				ValidatorsSource::Contract(_, ref new_list) => return Ok((Some(new_list.clone()), None)),
+				ValidatorsSource::Contract(_, ref new_list) =>
+					return Ok((Some(new_list.clone()), None)),
 			}
 		}
 
@@ -128,12 +128,12 @@ impl<'a> Validators<'a> {
 		.bloom();
 
 		if !header.log_bloom.contains(&expected_bloom) {
-			return Ok((None, None));
+			return Ok((None, None))
 		}
 
 		let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?;
 		if header.check_receipts_root(&receipts).is_err() {
-			return Err(Error::TransactionsReceiptsMismatch);
+			return Err(Error::TransactionsReceiptsMismatch)
 		}
 
 		// iterate in reverse because only the _last_ change in a given
@@ -145,24 +145,24 @@ impl<'a> Validators<'a> {
 				.filter(|r| r.log_bloom.contains(&expected_bloom))
 				.flat_map(|r| r.logs.iter())
 				.filter(|l| {
-					l.address == *contract_address
-						&& l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH
-						&& l.topics[1] == header.parent_hash
+					l.address == *contract_address &&
+						l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH &&
+						l.topics[1] == header.parent_hash
 				})
 				.filter_map(|l| {
 					let data_len = l.data.len();
 					if data_len < 64 {
-						return None;
+						return None
 					}
 
 					let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]);
 					let new_validators_len = new_validators_len_u256.low_u64();
 					if new_validators_len_u256 != new_validators_len.into() {
-						return None;
+						return None
 					}
 
 					if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) {
-						return None;
+						return None
 					}
 
 					Some(
@@ -216,12 +216,10 @@ impl<'a> Validators<'a> {
 				}
 			})
 			.and_then(|signal_block| {
-				storage
-					.scheduled_change(&signal_block.hash)
-					.map(|change| ChangeToEnact {
-						signal_block: Some(signal_block),
-						validators: change.validators,
-					})
+				storage.scheduled_change(&signal_block.hash).map(|change| ChangeToEnact {
+					signal_block: Some(signal_block),
+					validators: change.validators,
+				})
 			})
 	}
 
@@ -243,7 +241,11 @@ impl<'a> Validators<'a> {
 	}
 
 	/// Returns source of validators that should author the next header.
-	fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) {
+	fn source_at_next_header(
+		&self,
+		header_source_index: usize,
+		header_number: u64,
+	) -> (u64, &ValidatorsSource) {
 		match self.config {
 			ValidatorsConfiguration::Single(ref source) => (0, source),
 			ValidatorsConfiguration::Multi(ref sources) => {
@@ -251,13 +253,13 @@ impl<'a> Validators<'a> {
 				if next_source_index < sources.len() {
 					let next_source = &sources[next_source_index];
 					if next_source.0 < header_number + 1 {
-						return (next_source.0, &next_source.1);
+						return (next_source.0, &next_source.1)
 					}
 				}
 
 				let source = &sources[header_source_index];
 				(source.0, &source.1)
-			}
+			},
 		}
 	}
 }
@@ -275,8 +277,10 @@ impl ValidatorsSource {
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime};
-	use crate::{AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader};
+	use crate::{
+		mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime},
+		AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader,
+	};
 	use bp_eth_poa::compute_merkle_root;
 
 	const TOTAL_VALIDATORS: usize = 3;
@@ -290,10 +294,7 @@ pub(crate) mod tests {
 		]);
 		let validators = Validators::new(&config);
 
-		assert_eq!(
-			validators.source_at(99),
-			(0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),
-		);
+		assert_eq!(validators.source_at(99), (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),);
 		assert_eq!(
 			validators.source_at_next_header(0, 99),
 			(0, &ValidatorsSource::List(vec![[1; 20].into()])),
@@ -321,12 +322,12 @@ pub(crate) mod tests {
 	#[test]
 	fn maybe_signals_validators_change_works() {
 		// when contract is active, but bloom has no required bits set
-		let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+		let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+			Default::default(),
+			Vec::new(),
+		));
 		let validators = Validators::new(&config);
-		let mut header = AuraHeader {
-			number: u64::max_value(),
-			..Default::default()
-		};
+		let mut header = AuraHeader { number: u64::max_value(), ..Default::default() };
 		assert!(!validators.maybe_signals_validators_change(&header));
 
 		// when contract is active and bloom has required bits set
@@ -347,10 +348,7 @@ pub(crate) mod tests {
 			(200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])),
 		]);
 		let validators = Validators::new(&config);
-		let mut header = AuraHeader {
-			number: 100,
-			..Default::default()
-		};
+		let mut header = AuraHeader { number: 100, ..Default::default() };
 
 		// when we're at the block that switches to list source
 		assert_eq!(
@@ -406,26 +404,20 @@ pub(crate) mod tests {
 
 	fn try_finalize_with_scheduled_change(scheduled_at: Option<HeaderId>) -> Option<ChangeToEnact> {
 		run_test(TOTAL_VALIDATORS, |_| {
-			let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+			let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				Default::default(),
+				Vec::new(),
+			));
 			let validators = Validators::new(&config);
 			let storage = BridgeStorage::<TestRuntime>::new();
 
 			// when we're finailizing blocks 10...100
-			let id10 = HeaderId {
-				number: 10,
-				hash: [10; 32].into(),
-			};
-			let id100 = HeaderId {
-				number: 100,
-				hash: [100; 32].into(),
-			};
+			let id10 = HeaderId { number: 10, hash: [10; 32].into() };
+			let id100 = HeaderId { number: 100, hash: [100; 32].into() };
 			let finalized_blocks = vec![(id10, None), (id100, None)];
 			let header100 = StoredHeader::<u64> {
 				submitter: None,
-				header: AuraHeader {
-					number: 100,
-					..Default::default()
-				},
+				header: AuraHeader { number: 100, ..Default::default() },
 				total_difficulty: 0.into(),
 				next_validators_set_id: 0,
 				last_signal_block: scheduled_at,
@@ -445,16 +437,10 @@ pub(crate) mod tests {
 
 	#[test]
 	fn finalize_validators_change_finalizes_scheduled_change() {
-		let id50 = HeaderId {
-			number: 50,
-			..Default::default()
-		};
+		let id50 = HeaderId { number: 50, ..Default::default() };
 		assert_eq!(
 			try_finalize_with_scheduled_change(Some(id50)),
-			Some(ChangeToEnact {
-				signal_block: Some(id50),
-				validators: validators_addresses(1),
-			}),
+			Some(ChangeToEnact { signal_block: Some(id50), validators: validators_addresses(1) }),
 		);
 	}
 
@@ -465,10 +451,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() {
-		let id5 = HeaderId {
-			number: 5,
-			..Default::default()
-		};
+		let id5 = HeaderId { number: 5, ..Default::default() };
 		assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,);
 	}
 }
diff --git a/bridges/modules/ethereum/src/verification.rs b/bridges/modules/ethereum/src/verification.rs
index 06aa4906fb4d600947dd07a9aa8871136b5d90d6..34ae9ad3154325a4720c2d1c8800e20ec75b742e 100644
--- a/bridges/modules/ethereum/src/verification.rs
+++ b/bridges/modules/ethereum/src/verification.rs
@@ -14,11 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::validators::{Validators, ValidatorsConfiguration};
-use crate::{AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage};
+use crate::{
+	error::Error,
+	validators::{Validators, ValidatorsConfiguration},
+	AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage,
+};
 use bp_eth_poa::{
-	public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256,
+	public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep,
+	H256, H520, U128, U256,
 };
 use codec::Encode;
 use sp_io::crypto::secp256k1_ecdsa_recover;
@@ -28,16 +31,19 @@ use sp_std::{vec, vec::Vec};
 /// Pre-check to see if should try and import this header.
 /// Returns error if we should not try to import this block.
 /// Returns ID of passed header and best finalized header.
-pub fn is_importable_header<S: Storage>(storage: &S, header: &AuraHeader) -> Result<(HeaderId, HeaderId), Error> {
+pub fn is_importable_header<S: Storage>(
+	storage: &S,
+	header: &AuraHeader,
+) -> Result<(HeaderId, HeaderId), Error> {
 	// we never import any header that competes with finalized header
 	let finalized_id = storage.finalized_block();
 	if header.number <= finalized_id.number {
-		return Err(Error::AncientHeader);
+		return Err(Error::AncientHeader)
 	}
 	// we never import any header with known hash
 	let id = header.compute_id();
 	if storage.header(&id.hash).is_some() {
-		return Err(Error::KnownHeader);
+		return Err(Error::KnownHeader)
 	}
 
 	Ok((id, finalized_id))
@@ -64,7 +70,8 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 	// we want to avoid having same headers twice in the pool
 	// => we're strict about receipts here - if we need them, we require receipts to be Some,
 	// otherwise we require receipts to be None
-	let receipts_required = Validators::new(validators_config).maybe_signals_validators_change(header);
+	let receipts_required =
+		Validators::new(validators_config).maybe_signals_validators_change(header);
 	match (receipts_required, receipts.is_some()) {
 		(true, false) => return Err(Error::MissingTransactionsReceipts),
 		(false, true) => return Err(Error::RedundantTransactionsReceipts),
@@ -78,7 +85,7 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 	let (best_id, _) = storage.best_block();
 	let difference = header.number.saturating_sub(best_id.number);
 	if difference > pool_config.max_future_number_difference {
-		return Err(Error::UnsignedTooFarInTheFuture);
+		return Err(Error::UnsignedTooFarInTheFuture)
 	}
 
 	// TODO: only accept new headers when we're at the tip of PoA chain
@@ -104,11 +111,8 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 
 			// since our parent is already in the storage, we do not require it
 			// to be in the transaction pool
-			(
-				vec![],
-				vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag],
-			)
-		}
+			(vec![], vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag])
+		},
 		None => {
 			// we know nothing about parent header
 			// => the best thing we can do is to believe that there are no forks in
@@ -119,34 +123,37 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 				"import context is None only when header is missing from the storage;\
 							best header is always in the storage; qed",
 			);
-			let validators_check_result =
-				validator_checks(config, &best_context.validators_set().validators, header, header_step);
+			let validators_check_result = validator_checks(
+				config,
+				&best_context.validators_set().validators,
+				header,
+				header_step,
+			);
 			if let Err(error) = validators_check_result {
-				find_next_validators_signal(storage, &best_context)
-					.ok_or(error)
-					.and_then(|next_validators| validator_checks(config, &next_validators, header, header_step))?;
+				find_next_validators_signal(storage, &best_context).ok_or(error).and_then(
+					|next_validators| {
+						validator_checks(config, &next_validators, header, header_step)
+					},
+				)?;
 			}
 
 			// since our parent is missing from the storage, we **DO** require it
 			// to be in the transaction pool
 			// (- 1 can't underflow because there's always best block in the header)
-			let requires_header_number_and_hash_tag = HeaderId {
-				number: header.number - 1,
-				hash: header.parent_hash,
-			}
-			.encode();
+			let requires_header_number_and_hash_tag =
+				HeaderId { number: header.number - 1, hash: header.parent_hash }.encode();
 			(
 				vec![requires_header_number_and_hash_tag],
 				vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag],
 			)
-		}
+		},
 	};
 
 	// the heaviest, but rare operation - we do not want invalid receipts in the pool
 	if let Some(receipts) = receipts {
 		log::trace!(target: "runtime", "Got receipts! {:?}", receipts);
 		if header.check_receipts_root(receipts).is_err() {
-			return Err(Error::TransactionsReceiptsMismatch);
+			return Err(Error::TransactionsReceiptsMismatch)
 		}
 	}
 
@@ -189,32 +196,32 @@ fn contextless_checks<CT: ChainTime>(
 ) -> Result<(), Error> {
 	let expected_seal_fields = expected_header_seal_fields(config, header);
 	if header.seal.len() != expected_seal_fields {
-		return Err(Error::InvalidSealArity);
+		return Err(Error::InvalidSealArity)
 	}
 	if header.number >= u64::max_value() {
-		return Err(Error::RidiculousNumber);
+		return Err(Error::RidiculousNumber)
 	}
 	if header.gas_used > header.gas_limit {
-		return Err(Error::TooMuchGasUsed);
+		return Err(Error::TooMuchGasUsed)
 	}
 	if header.gas_limit < config.min_gas_limit {
-		return Err(Error::InvalidGasLimit);
+		return Err(Error::InvalidGasLimit)
 	}
 	if header.gas_limit > config.max_gas_limit {
-		return Err(Error::InvalidGasLimit);
+		return Err(Error::InvalidGasLimit)
 	}
 	if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size {
-		return Err(Error::ExtraDataOutOfBounds);
+		return Err(Error::ExtraDataOutOfBounds)
 	}
 
 	// we can't detect if block is from future in runtime
 	// => let's only do an overflow check
 	if header.timestamp > i32::max_value() as u64 {
-		return Err(Error::TimestampOverflow);
+		return Err(Error::TimestampOverflow)
 	}
 
 	if chain_time.is_timestamp_ahead(header.timestamp) {
-		return Err(Error::HeaderTimestampIsAhead);
+		return Err(Error::HeaderTimestampIsAhead)
 	}
 
 	Ok(())
@@ -233,15 +240,16 @@ fn contextual_checks<Submitter>(
 
 	// Ensure header is from the step after context.
 	if header_step == parent_step {
-		return Err(Error::DoubleVote);
+		return Err(Error::DoubleVote)
 	}
 	#[allow(clippy::suspicious_operation_groupings)]
 	if header.number >= config.validate_step_transition && header_step < parent_step {
-		return Err(Error::DoubleVote);
+		return Err(Error::DoubleVote)
 	}
 
-	// If empty step messages are enabled we will validate the messages in the seal, missing messages are not
-	// reported as there's no way to tell whether the empty step message was never sent or simply not included.
+	// If empty step messages are enabled we will validate the messages in the seal, missing
+	// messages are not reported as there's no way to tell whether the empty step message was never
+	// sent or simply not included.
 	let empty_steps_len = match header.number >= config.empty_steps_transition {
 		true => {
 			let strict_empty_steps = header.number >= config.strict_empty_steps_transition;
@@ -251,16 +259,16 @@ fn contextual_checks<Submitter>(
 
 			for empty_step in empty_steps {
 				if empty_step.step <= parent_step || empty_step.step >= header_step {
-					return Err(Error::InsufficientProof);
+					return Err(Error::InsufficientProof)
 				}
 
 				if !verify_empty_step(&header.parent_hash, &empty_step, validators) {
-					return Err(Error::InsufficientProof);
+					return Err(Error::InsufficientProof)
 				}
 
 				if strict_empty_steps {
 					if empty_step.step <= prev_empty_step {
-						return Err(Error::InsufficientProof);
+						return Err(Error::InsufficientProof)
 					}
 
 					prev_empty_step = empty_step.step;
@@ -268,7 +276,7 @@ fn contextual_checks<Submitter>(
 			}
 
 			empty_steps_len
-		}
+		},
 		false => 0,
 	};
 
@@ -276,7 +284,7 @@ fn contextual_checks<Submitter>(
 	if header.number >= config.validate_score_transition {
 		let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _);
 		if header.difficulty != expected_difficulty {
-			return Err(Error::InvalidDifficulty);
+			return Err(Error::InvalidDifficulty)
 		}
 	}
 
@@ -292,16 +300,17 @@ fn validator_checks(
 ) -> Result<(), Error> {
 	let expected_validator = *step_validator(validators, header_step);
 	if header.author != expected_validator {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	let validator_signature = header.signature().ok_or(Error::MissingSignature)?;
 	let header_seal_hash = header
 		.seal_hash(header.number >= config.empty_steps_transition)
 		.ok_or(Error::MissingEmptySteps)?;
-	let is_invalid_proposer = !verify_signature(&expected_validator, &validator_signature, &header_seal_hash);
+	let is_invalid_proposer =
+		!verify_signature(&expected_validator, &validator_signature, &header_seal_hash);
 	if is_invalid_proposer {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	Ok(())
@@ -324,8 +333,13 @@ fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[A
 }
 
 /// Chain scoring: total weight is sqrt(U256::max_value())*height - step
-pub(crate) fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 {
-	U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps)
+pub(crate) fn calculate_score(
+	parent_step: u64,
+	current_step: u64,
+	current_empty_steps: usize,
+) -> U256 {
+	U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) +
+		U256::from(current_empty_steps)
 }
 
 /// Verify that the signature over message has been produced by given validator.
@@ -337,7 +351,10 @@ fn verify_signature(expected_validator: &Address, signature: &H520, message: &H2
 }
 
 /// Find next unfinalized validators set change after finalized set.
-fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<S::Submitter>) -> Option<Vec<Address>> {
+fn find_next_validators_signal<S: Storage>(
+	storage: &S,
+	context: &ImportContext<S::Submitter>,
+) -> Option<Vec<Address>> {
 	// that's the earliest block number we may met in following loop
 	// it may be None if that's the first set
 	let best_set_signal_block = context.validators_set().signal_block;
@@ -352,14 +369,15 @@ fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<
 		// next_current_block_hash points to the block that schedules next
 		// change
 		let current_scheduled_set = match current_set_signal_block {
-			Some(current_set_signal_block) if Some(&current_set_signal_block) == best_set_signal_block.as_ref() => {
-				return next_scheduled_set.map(|scheduled_set| scheduled_set.validators)
-			}
+			Some(current_set_signal_block)
+				if Some(&current_set_signal_block) == best_set_signal_block.as_ref() =>
+				return next_scheduled_set.map(|scheduled_set| scheduled_set.validators),
 			None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators),
-			Some(current_set_signal_block) => storage.scheduled_change(&current_set_signal_block.hash).expect(
-				"header that is associated with this change is not pruned;\
+			Some(current_set_signal_block) =>
+				storage.scheduled_change(&current_set_signal_block.hash).expect(
+					"header that is associated with this change is not pruned;\
 					scheduled changes are only removed when header is pruned; qed",
-			),
+				),
 		};
 
 		current_set_signal_block = current_scheduled_set.prev_signal_block;
@@ -370,13 +388,15 @@ fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{
-		insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, validators_addresses,
-		validators_change_receipt, AccountId, ConstChainTime, HeaderBuilder, TestRuntime, GAS_LIMIT,
-	};
-	use crate::validators::ValidatorsSource;
 	use crate::{
-		pool_configuration, BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId,
+		mock::{
+			insert_header, run_test_with_genesis, test_aura_config, validator, validator_address,
+			validators_addresses, validators_change_receipt, AccountId, ConstChainTime,
+			HeaderBuilder, TestRuntime, GAS_LIMIT,
+		},
+		pool_configuration,
+		validators::ValidatorsSource,
+		BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId,
 		ScheduledChanges, ValidatorsSet, ValidatorsSets,
 	};
 	use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256};
@@ -391,7 +411,10 @@ mod tests {
 		HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0))
 	}
 
-	fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result<ImportContext<AccountId>, Error> {
+	fn verify_with_config(
+		config: &AuraConfiguration,
+		header: &AuraHeader,
+	) -> Result<ImportContext<AccountId>, Error> {
 		run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| {
 			let storage = BridgeStorage::<TestRuntime>::new();
 			verify_aura_header(&storage, config, None, header, &ConstChainTime::default())
@@ -418,8 +441,10 @@ mod tests {
 
 			FinalizedBlock::<TestRuntime, ()>::put(block2_id);
 
-			let validators_config =
-				ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+			let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				Default::default(),
+				Vec::new(),
+			));
 			let (header, receipts) = make_header(&validators);
 			accept_aura_header_into_pool(
 				&storage,
@@ -433,7 +458,11 @@ mod tests {
 		})
 	}
 
-	fn change_validators_set_at(number: u64, finalized_set: Vec<Address>, signalled_set: Option<Vec<Address>>) {
+	fn change_validators_set_at(
+		number: u64,
+		finalized_set: Vec<Address>,
+		signalled_set: Option<Vec<Address>>,
+	) {
 		let set_id = NextValidatorsSetId::<TestRuntime, ()>::get();
 		NextValidatorsSetId::<TestRuntime, ()>::put(set_id + 1);
 		ValidatorsSets::<TestRuntime, ()>::insert(
@@ -458,10 +487,7 @@ mod tests {
 			});
 			ScheduledChanges::<TestRuntime, ()>::insert(
 				header.header.parent_hash,
-				AuraScheduledChange {
-					validators: signalled_set,
-					prev_signal_block: None,
-				},
+				AuraScheduledChange { validators: signalled_set, prev_signal_block: None },
 			);
 		}
 
@@ -520,21 +546,15 @@ mod tests {
 		config.max_gas_limit = 200.into();
 
 		// when limit is lower than expected
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(50.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(50.into()).sign_by(&validator(0));
 		assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 
 		// when limit is larger than expected
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(250.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(250.into()).sign_by(&validator(0));
 		assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 
 		// when limit is within expected range
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(150.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(150.into()).sign_by(&validator(0));
 		assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 	}
 
@@ -573,7 +593,8 @@ mod tests {
 		// expected import context after verification
 		let expect = ImportContext::<AccountId> {
 			submitter: None,
-			parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(),
+			parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3")
+				.into(),
 			parent_header: genesis(),
 			parent_total_difficulty: U256::zero(),
 			parent_scheduled_change: None,
@@ -587,7 +608,8 @@ mod tests {
 				signal_block: None,
 				enact_block: HeaderId {
 					number: 0,
-					hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(),
+					hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3")
+						.into(),
 				},
 			},
 			last_signal_block: None,
@@ -729,7 +751,10 @@ mod tests {
 	fn pool_verifies_known_blocks() {
 		// when header is known
 		assert_eq!(
-			default_accept_into_pool(|validators| (HeaderBuilder::with_parent_number(2).sign_by_set(validators), None)),
+			default_accept_into_pool(|validators| (
+				HeaderBuilder::with_parent_number(2).sign_by_set(validators),
+				None
+			)),
 			Err(Error::KnownHeader),
 		);
 	}
@@ -785,7 +810,10 @@ mod tests {
 	fn pool_verifies_future_block_number() {
 		// when header is too far from the future
 		assert_eq!(
-			default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(validators), None),),
+			default_accept_into_pool(|validators| (
+				HeaderBuilder::with_number(100).sign_by_set(validators),
+				None
+			),),
 			Err(Error::UnsignedTooFarInTheFuture),
 		);
 	}
@@ -811,7 +839,10 @@ mod tests {
 		// (even if header will be considered invalid/duplicate later, we can use this signature
 		// as a proof of malicious action by this validator)
 		assert_eq!(
-			default_accept_into_pool(|_| (HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), None,)),
+			default_accept_into_pool(|_| (
+				HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)),
+				None,
+			)),
 			Err(Error::NotValidator),
 		);
 	}
@@ -829,10 +860,7 @@ mod tests {
 				// no tags are required
 				vec![],
 				// header provides two tags
-				vec![
-					(4u64, validators_addresses(3)[1]).encode(),
-					(4u64, hash.unwrap()).encode(),
-				],
+				vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),],
 			)),
 		);
 	}
@@ -843,9 +871,8 @@ mod tests {
 		let mut parent_id = None;
 		assert_eq!(
 			default_accept_into_pool(|validators| {
-				let header = HeaderBuilder::with_number(5)
-					.step(GENESIS_STEP + 5)
-					.sign_by_set(validators);
+				let header =
+					HeaderBuilder::with_number(5).step(GENESIS_STEP + 5).sign_by_set(validators);
 				id = Some(header.compute_id());
 				parent_id = header.parent_id();
 				(header, None)
@@ -881,7 +908,11 @@ mod tests {
 		assert_eq!(
 			default_accept_into_pool(|actual_validators| {
 				// change finalized set at parent header + signal valid set at parent block
-				change_validators_set_at(3, validators_addresses(10), Some(validators_addresses(3)));
+				change_validators_set_at(
+					3,
+					validators_addresses(10),
+					Some(validators_addresses(3)),
+				);
 
 				// header is signed using wrong set
 				let header = HeaderBuilder::with_number(5)
@@ -933,10 +964,7 @@ mod tests {
 				// no tags are required
 				vec![],
 				// header provides two tags
-				vec![
-					(4u64, validators_addresses(3)[1]).encode(),
-					(4u64, hash.unwrap()).encode(),
-				],
+				vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),],
 			)),
 		);
 	}
diff --git a/bridges/modules/grandpa/src/benchmarking.rs b/bridges/modules/grandpa/src/benchmarking.rs
index 1714024928dc56faf47399b240674a090d3484dc..46e1e41a87028bd18dd7ce03a3862e00cd518d6f 100644
--- a/bridges/modules/grandpa/src/benchmarking.rs
+++ b/bridges/modules/grandpa/src/benchmarking.rs
@@ -42,7 +42,8 @@
 use crate::*;
 
 use bp_test_utils::{
-	accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID,
+	accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND,
+	TEST_GRANDPA_SET_ID,
 };
 use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller};
 use frame_support::traits::Get;
diff --git a/bridges/modules/grandpa/src/lib.rs b/bridges/modules/grandpa/src/lib.rs
index f1599a5ebfbaf3fbf468211b57430ec569501961..279e23404a059f74141423a492393b074865190a 100644
--- a/bridges/modules/grandpa/src/lib.rs
+++ b/bridges/modules/grandpa/src/lib.rs
@@ -38,8 +38,7 @@
 
 use crate::weights::WeightInfo;
 
-use bp_header_chain::justification::GrandpaJustification;
-use bp_header_chain::InitializationData;
+use bp_header_chain::{justification::GrandpaJustification, InitializationData};
 use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf};
 use finality_grandpa::voter_set::VoterSet;
 use frame_support::{ensure, fail};
@@ -136,10 +135,7 @@ pub mod pallet {
 			ensure_operational::<T, I>()?;
 			let _ = ensure_signed(origin)?;
 
-			ensure!(
-				Self::request_count() < T::MaxRequests::get(),
-				<Error<T, I>>::TooManyRequests
-			);
+			ensure!(Self::request_count() < T::MaxRequests::get(), <Error<T, I>>::TooManyRequests);
 
 			let (hash, number) = (finality_target.hash(), finality_target.number());
 			log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target);
@@ -153,27 +149,29 @@ pub mod pallet {
 						finality_target,
 					);
 					fail!(<Error<T, I>>::NotInitialized);
-				}
+				},
 			};
 
 			// We do a quick check here to ensure that our header chain is making progress and isn't
-			// "travelling back in time" (which could be indicative of something bad, e.g a hard-fork).
+			// "travelling back in time" (which could be indicative of something bad, e.g a
+			// hard-fork).
 			ensure!(best_finalized.number() < number, <Error<T, I>>::OldHeader);
 
 			let authority_set = <CurrentAuthoritySet<T, I>>::get();
 			let set_id = authority_set.set_id;
 			verify_justification::<T, I>(&justification, hash, *number, authority_set)?;
 
-			let is_authorities_change_enacted = try_enact_authority_change::<T, I>(&finality_target, set_id)?;
+			let is_authorities_change_enacted =
+				try_enact_authority_change::<T, I>(&finality_target, set_id)?;
 			<RequestCount<T, I>>::mutate(|count| *count += 1);
 			insert_header::<T, I>(*finality_target, hash);
 			log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash);
 
-			// mandatory header is a header that changes authorities set. The pallet can't go further
-			// without importing this header. So every bridge MUST import mandatory headers.
+			// mandatory header is a header that changes authorities set. The pallet can't go
+			// further without importing this header. So every bridge MUST import mandatory headers.
 			//
-			// We don't want to charge extra costs for mandatory operations. So relayer is not paying
-			// fee for mandatory headers import transactions.
+			// We don't want to charge extra costs for mandatory operations. So relayer is not
+			// paying fee for mandatory headers import transactions.
 			let is_mandatory_header = is_authorities_change_enacted;
 			let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes };
 
@@ -183,8 +181,8 @@ pub mod pallet {
 		/// Bootstrap the bridge pallet with an initial header and authority set from which to sync.
 		///
 		/// The initial configuration provided does not need to be the genesis header of the bridged
-		/// chain, it can be any arbitrary header. You can also provide the next scheduled set change
-		/// if it is already know.
+		/// chain, it can be any arbitrary header. You can also provide the next scheduled set
+		/// change if it is already know.
 		///
 		/// This function is only allowed to be called from a trusted origin and writes to storage
 		/// with practically no checks in terms of the validity of the data. It is important that
@@ -213,17 +211,20 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
-		pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResultWithPostInfo {
+		pub fn set_owner(
+			origin: OriginFor<T>,
+			new_owner: Option<T::AccountId>,
+		) -> DispatchResultWithPostInfo {
 			ensure_owner_or_root::<T, I>(origin)?;
 			match new_owner {
 				Some(new_owner) => {
 					PalletOwner::<T, I>::put(&new_owner);
 					log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner);
-				}
+				},
 				None => {
 					PalletOwner::<T, I>::kill();
 					log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet.");
-				}
+				},
 			}
 
 			Ok(().into())
@@ -233,7 +234,10 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
-		pub fn set_operational(origin: OriginFor<T>, operational: bool) -> DispatchResultWithPostInfo {
+		pub fn set_operational(
+			origin: OriginFor<T>,
+			operational: bool,
+		) -> DispatchResultWithPostInfo {
 			ensure_owner_or_root::<T, I>(origin)?;
 			<IsHalted<T, I>>::put(operational);
 
@@ -260,11 +264,13 @@ pub mod pallet {
 
 	/// Hash of the header used to bootstrap the pallet.
 	#[pallet::storage]
-	pub(super) type InitialHash<T: Config<I>, I: 'static = ()> = StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
+	pub(super) type InitialHash<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
 
 	/// Hash of the best finalized header.
 	#[pallet::storage]
-	pub(super) type BestFinalized<T: Config<I>, I: 'static = ()> = StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
+	pub(super) type BestFinalized<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
 
 	/// A ring buffer of imported hashes. Ordered by the insertion time.
 	#[pallet::storage]
@@ -273,7 +279,8 @@ pub mod pallet {
 
 	/// Current ring buffer position.
 	#[pallet::storage]
-	pub(super) type ImportedHashesPointer<T: Config<I>, I: 'static = ()> = StorageValue<_, u32, ValueQuery>;
+	pub(super) type ImportedHashesPointer<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, u32, ValueQuery>;
 
 	/// Headers which have been imported into the pallet.
 	#[pallet::storage]
@@ -292,7 +299,8 @@ pub mod pallet {
 	/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
 	/// flag directly or call the `halt_operations`).
 	#[pallet::storage]
-	pub(super) type PalletOwner<T: Config<I>, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>;
+	pub(super) type PalletOwner<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, T::AccountId, OptionQuery>;
 
 	/// If true, all pallet transactions are failed immediately.
 	#[pallet::storage]
@@ -309,10 +317,7 @@ pub mod pallet {
 	#[cfg(feature = "std")]
 	impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> {
 		fn default() -> Self {
-			Self {
-				owner: None,
-				init_data: None,
-			}
+			Self { owner: None, init_data: None }
 		}
 	}
 
@@ -419,29 +424,35 @@ pub mod pallet {
 	) -> Result<(), sp_runtime::DispatchError> {
 		use bp_header_chain::justification::verify_justification;
 
-		let voter_set = VoterSet::new(authority_set.authorities).ok_or(<Error<T, I>>::InvalidAuthoritySet)?;
+		let voter_set =
+			VoterSet::new(authority_set.authorities).ok_or(<Error<T, I>>::InvalidAuthoritySet)?;
 		let set_id = authority_set.set_id;
 
-		Ok(
-			verify_justification::<BridgedHeader<T, I>>((hash, number), set_id, &voter_set, justification).map_err(
-				|e| {
-					log::error!(
-						target: "runtime::bridge-grandpa",
-						"Received invalid justification for {:?}: {:?}",
-						hash,
-						e,
-					);
-					<Error<T, I>>::InvalidJustification
-				},
-			)?,
+		Ok(verify_justification::<BridgedHeader<T, I>>(
+			(hash, number),
+			set_id,
+			&voter_set,
+			justification,
 		)
+		.map_err(|e| {
+			log::error!(
+				target: "runtime::bridge-grandpa",
+				"Received invalid justification for {:?}: {:?}",
+				hash,
+				e,
+			);
+			<Error<T, I>>::InvalidJustification
+		})?)
 	}
 
 	/// Import a previously verified header to the storage.
 	///
 	/// Note this function solely takes care of updating the storage and pruning old entries,
 	/// but does not verify the validity of such import.
-	pub(crate) fn insert_header<T: Config<I>, I: 'static>(header: BridgedHeader<T, I>, hash: BridgedBlockHash<T, I>) {
+	pub(crate) fn insert_header<T: Config<I>, I: 'static>(
+		header: BridgedHeader<T, I>,
+		hash: BridgedBlockHash<T, I>,
+	) {
 		let index = <ImportedHashesPointer<T, I>>::get();
 		let pruning = <ImportedHashes<T, I>>::try_get(index);
 		<BestFinalized<T, I>>::put(hash);
@@ -461,12 +472,7 @@ pub mod pallet {
 	pub(crate) fn initialize_bridge<T: Config<I>, I: 'static>(
 		init_params: super::InitializationData<BridgedHeader<T, I>>,
 	) {
-		let super::InitializationData {
-			header,
-			authority_list,
-			set_id,
-			is_halted,
-		} = init_params;
+		let super::InitializationData { header, authority_list, set_id, is_halted } = init_params;
 
 		let initial_hash = header.hash();
 		<InitialHash<T, I>>::put(initial_hash);
@@ -506,7 +512,9 @@ pub mod pallet {
 	fn ensure_owner_or_root<T: Config<I>, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> {
 		match origin.into() {
 			Ok(RawOrigin::Root) => Ok(()),
-			Ok(RawOrigin::Signed(ref signer)) if Some(signer) == <PalletOwner<T, I>>::get().as_ref() => Ok(()),
+			Ok(RawOrigin::Signed(ref signer))
+				if Some(signer) == <PalletOwner<T, I>>::get().as_ref() =>
+				Ok(()),
 			_ => Err(BadOrigin),
 		}
 	}
@@ -553,14 +561,17 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		parse: impl FnOnce(bp_runtime::StorageProofChecker<BridgedBlockHasher<T, I>>) -> R,
 	) -> Result<R, sp_runtime::DispatchError> {
 		let header = <ImportedHeaders<T, I>>::get(hash).ok_or(Error::<T, I>::UnknownHeader)?;
-		let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof)
-			.map_err(|_| Error::<T, I>::StorageRootMismatch)?;
+		let storage_proof_checker =
+			bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof)
+				.map_err(|_| Error::<T, I>::StorageRootMismatch)?;
 
 		Ok(parse(storage_proof_checker))
 	}
 }
 
-pub(crate) fn find_scheduled_change<H: HeaderT>(header: &H) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
+pub(crate) fn find_scheduled_change<H: HeaderT>(
+	header: &H,
+) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
 	use sp_runtime::generic::OpaqueDigestItemId;
 
 	let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
@@ -599,7 +610,8 @@ pub(crate) fn find_forced_change<H: HeaderT>(
 pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader<T, I>) {
 	initialize_bridge::<T, I>(InitializationData {
 		header: Box::new(header),
-		authority_list: sp_std::vec::Vec::new(), // we don't verify any proofs in external benchmarks
+		authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external
+		                                          * benchmarks */
 		set_id: 0,
 		is_halted: false,
 	});
@@ -608,14 +620,15 @@ pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime};
+	use crate::mock::{
+		run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime,
+	};
 	use bp_test_utils::{
-		authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE,
-		BOB,
+		authority_list, make_default_justification, make_justification_for_header,
+		JustificationGeneratorParams, ALICE, BOB,
 	};
 	use codec::Encode;
-	use frame_support::weights::PostDispatchInfo;
-	use frame_support::{assert_err, assert_noop, assert_ok};
+	use frame_support::{assert_err, assert_noop, assert_ok, weights::PostDispatchInfo};
 	use sp_runtime::{Digest, DigestItem, DispatchError};
 
 	fn initialize_substrate_bridge() {
@@ -624,7 +637,10 @@ mod tests {
 
 	fn init_with_origin(
 		origin: Origin,
-	) -> Result<InitializationData<TestHeader>, sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>> {
+	) -> Result<
+		InitializationData<TestHeader>,
+		sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>,
+	> {
 		let genesis = test_header(0);
 
 		let init_data = InitializationData {
@@ -641,7 +657,11 @@ mod tests {
 		let header = test_header(header.into());
 		let justification = make_default_justification(&header);
 
-		Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification)
+		Pallet::<TestRuntime>::submit_finality_proof(
+			Origin::signed(1),
+			Box::new(header),
+			justification,
+		)
 	}
 
 	fn next_block() {
@@ -653,10 +673,11 @@ mod tests {
 	}
 
 	fn change_log(delay: u64) -> Digest<TestHash> {
-		let consensus_log = ConsensusLog::<TestNumber>::ScheduledChange(sp_finality_grandpa::ScheduledChange {
-			next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
-			delay,
-		});
+		let consensus_log =
+			ConsensusLog::<TestNumber>::ScheduledChange(sp_finality_grandpa::ScheduledChange {
+				next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
+				delay,
+			});
 
 		Digest::<TestHash> {
 			logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())],
@@ -821,14 +842,16 @@ mod tests {
 
 			let header = test_header(1);
 
-			let params = JustificationGeneratorParams::<TestHeader> {
-				set_id: 2,
-				..Default::default()
-			};
+			let params =
+				JustificationGeneratorParams::<TestHeader> { set_id: 2, ..Default::default() };
 			let justification = make_justification_for_header(params);
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidJustification
 			);
 		})
@@ -844,7 +867,11 @@ mod tests {
 			justification.round = 42;
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidJustification
 			);
 		})
@@ -869,7 +896,11 @@ mod tests {
 			let justification = make_default_justification(&header);
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidAuthoritySet
 			);
 		})
@@ -942,7 +973,11 @@ mod tests {
 
 			// Should not be allowed to import this header
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification
+				),
 				<Error<TestRuntime>>::UnsupportedScheduledChange
 			);
 		})
@@ -963,7 +998,11 @@ mod tests {
 
 			// Should not be allowed to import this header
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), justification),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification
+				),
 				<Error<TestRuntime>>::UnsupportedScheduledChange
 			);
 		})
@@ -1021,7 +1060,11 @@ mod tests {
 				let mut invalid_justification = make_default_justification(&header);
 				invalid_justification.round = 42;
 
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), Box::new(header), invalid_justification)
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					invalid_justification,
+				)
 			};
 
 			initialize_substrate_bridge();
diff --git a/bridges/modules/messages/src/benchmarking.rs b/bridges/modules/messages/src/benchmarking.rs
index d6ec00324493add34aefbb6711ab00ce4c33ef96..788ccc070310ea288f264b082798209fdbb5da3c 100644
--- a/bridges/modules/messages/src/benchmarking.rs
+++ b/bridges/modules/messages/src/benchmarking.rs
@@ -16,15 +16,15 @@
 
 //! Messages pallet benchmarking.
 
-use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH;
 use crate::{
-	inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, outbound_lane::ReceivalConfirmationResult,
-	Call,
+	inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane,
+	outbound_lane::ReceivalConfirmationResult, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, Call,
 };
 
 use bp_messages::{
-	source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, InboundLaneData, LaneId,
-	MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState,
+	source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages,
+	InboundLaneData, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	UnrewardedRelayersState,
 };
 use bp_runtime::messages::DispatchFeePayment;
 use frame_benchmarking::{account, benchmarks_instance_pallet};
@@ -50,11 +50,11 @@ pub enum ProofSize {
 	/// The proof is expected to be minimal. If value size may be changed, then it is expected to
 	/// have given size.
 	Minimal(u32),
-	/// The proof is expected to have at least given size and grow by increasing number of trie nodes
-	/// included in the proof.
+	/// The proof is expected to have at least given size and grow by increasing number of trie
+	/// nodes included in the proof.
 	HasExtraNodes(u32),
-	/// The proof is expected to have at least given size and grow by increasing value that is stored
-	/// in the trie.
+	/// The proof is expected to have at least given size and grow by increasing value that is
+	/// stored in the trie.
 	HasLargeLeaf(u32),
 }
 
@@ -900,18 +900,12 @@ benchmarks_instance_pallet! {
 
 fn send_regular_message<T: Config<I>, I: 'static>() {
 	let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
-	outbound_lane.send_message(MessageData {
-		payload: vec![],
-		fee: MESSAGE_FEE.into(),
-	});
+	outbound_lane.send_message(MessageData { payload: vec![], fee: MESSAGE_FEE.into() });
 }
 
 fn send_regular_message_with_payload<T: Config<I>, I: 'static>(payload: Vec<u8>) {
 	let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
-	outbound_lane.send_message(MessageData {
-		payload,
-		fee: MESSAGE_FEE.into(),
-	});
+	outbound_lane.send_message(MessageData { payload, fee: MESSAGE_FEE.into() });
 }
 
 fn confirm_message_delivery<T: Config<I>, I: 'static>(nonce: MessageNonce) {
@@ -943,7 +937,10 @@ fn receive_messages<T: Config<I>, I: 'static>(nonce: MessageNonce) {
 	});
 }
 
-fn ensure_relayer_rewarded<T: Config<I>, I: 'static>(relayer_id: &T::AccountId, old_balance: &T::OutboundMessageFee) {
+fn ensure_relayer_rewarded<T: Config<I>, I: 'static>(
+	relayer_id: &T::AccountId,
+	old_balance: &T::OutboundMessageFee,
+) {
 	let new_balance = T::account_balance(relayer_id);
 	assert!(
 		new_balance > *old_balance,
diff --git a/bridges/modules/messages/src/inbound_lane.rs b/bridges/modules/messages/src/inbound_lane.rs
index 645174b6d750f81271c18690b722bcc9c38c1c46..00875bb878a823beda55a136ab96910ea8eceaaf 100644
--- a/bridges/modules/messages/src/inbound_lane.rs
+++ b/bridges/modules/messages/src/inbound_lane.rs
@@ -18,7 +18,8 @@
 
 use bp_messages::{
 	target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch},
-	DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData,
+	UnrewardedRelayer,
 };
 use bp_runtime::messages::MessageDispatchResult;
 use frame_support::RuntimeDebug;
@@ -71,16 +72,19 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 	}
 
 	/// Receive state of the corresponding outbound lane.
-	pub fn receive_state_update(&mut self, outbound_lane_data: OutboundLaneData) -> Option<MessageNonce> {
+	pub fn receive_state_update(
+		&mut self,
+		outbound_lane_data: OutboundLaneData,
+	) -> Option<MessageNonce> {
 		let mut data = self.storage.data();
 		let last_delivered_nonce = data.last_delivered_nonce();
 
 		if outbound_lane_data.latest_received_nonce > last_delivered_nonce {
 			// this is something that should never happen if proofs are correct
-			return None;
+			return None
 		}
 		if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce {
-			return None;
+			return None
 		}
 
 		let new_confirmed_nonce = outbound_lane_data.latest_received_nonce;
@@ -95,7 +99,8 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 			data.relayers.pop_front();
 		}
 		// Secondly, update the next record with lower nonce equal to new confirmed nonce if needed.
-		// Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap.
+		// Note: There will be max. 1 record to update as we don't allow messages from relayers to
+		// overlap.
 		match data.relayers.front_mut() {
 			Some(entry) if entry.messages.begin < new_confirmed_nonce => {
 				entry.messages.dispatch_results = entry
@@ -103,8 +108,8 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 					.dispatch_results
 					.split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _);
 				entry.messages.begin = new_confirmed_nonce + 1;
-			}
-			_ => {}
+			},
+			_ => {},
 		}
 
 		self.storage.set_data(data);
@@ -122,28 +127,25 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 		let mut data = self.storage.data();
 		let is_correct_message = nonce == data.last_delivered_nonce() + 1;
 		if !is_correct_message {
-			return ReceivalResult::InvalidNonce;
+			return ReceivalResult::InvalidNonce
 		}
 
 		// if there are more unrewarded relayer entries than we may accept, reject this message
 		if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() {
-			return ReceivalResult::TooManyUnrewardedRelayers;
+			return ReceivalResult::TooManyUnrewardedRelayers
 		}
 
 		// if there are more unconfirmed messages than we may accept, reject this message
 		let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce);
 		if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() {
-			return ReceivalResult::TooManyUnconfirmedMessages;
+			return ReceivalResult::TooManyUnconfirmedMessages
 		}
 
 		// then, dispatch message
 		let dispatch_result = P::dispatch(
 			relayer_at_this_chain,
 			DispatchMessage {
-				key: MessageKey {
-					lane_id: self.storage.id(),
-					nonce,
-				},
+				key: MessageKey { lane_id: self.storage.id(), nonce },
 				data: message_data,
 			},
 		);
@@ -153,7 +155,7 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 			Some(entry) if entry.relayer == *relayer_at_bridged_chain => {
 				entry.messages.note_dispatched_message(dispatch_result.dispatch_result);
 				false
-			}
+			},
 			_ => true,
 		};
 		if push_new {
@@ -174,8 +176,9 @@ mod tests {
 	use crate::{
 		inbound_lane,
 		mock::{
-			dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, TestRuntime,
-			REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C,
+			dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch,
+			TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B,
+			TEST_RELAYER_C,
 		},
 		RuntimeInboundLaneStorage,
 	};
@@ -284,16 +287,10 @@ mod tests {
 			let mut seed_storage_data = lane.storage.data();
 			// Prepare data
 			seed_storage_data.last_confirmed_nonce = 0;
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
 			// Simulate messages batch (2, 3, 4) from relayer #2
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
 			lane.storage.set_data(seed_storage_data);
 			// Check
 			assert_eq!(
@@ -335,7 +332,8 @@ mod tests {
 	fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() {
 		run_test(|| {
 			let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
-			let max_nonce = <TestRuntime as crate::Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
+			let max_nonce =
+				<TestRuntime as crate::Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
 			for current_nonce in 1..max_nonce + 1 {
 				assert_eq!(
 					lane.receive_message::<TestMessageDispatch, _>(
@@ -374,7 +372,8 @@ mod tests {
 	fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() {
 		run_test(|| {
 			let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
-			let max_nonce = <TestRuntime as crate::Config>::MaxUnconfirmedMessagesAtInboundLane::get();
+			let max_nonce =
+				<TestRuntime as crate::Config>::MaxUnconfirmedMessagesAtInboundLane::get();
 			for current_nonce in 1..=max_nonce {
 				assert_eq!(
 					lane.receive_message::<TestMessageDispatch, _>(
diff --git a/bridges/modules/messages/src/instant_payments.rs b/bridges/modules/messages/src/instant_payments.rs
index 1c67c0446a3ba082b7f258bf34e5ad19d1a61867..4933901c26708ab998db34cf6bd1fb9f26ff77b0 100644
--- a/bridges/modules/messages/src/instant_payments.rs
+++ b/bridges/modules/messages/src/instant_payments.rs
@@ -46,7 +46,8 @@ pub struct InstantCurrencyPayments<T, Currency, GetConfirmationFee, RootAccount>
 	_phantom: sp_std::marker::PhantomData<(T, Currency, GetConfirmationFee, RootAccount)>,
 }
 
-impl<T, Currency, GetConfirmationFee, RootAccount> MessageDeliveryAndDispatchPayment<T::AccountId, Currency::Balance>
+impl<T, Currency, GetConfirmationFee, RootAccount>
+	MessageDeliveryAndDispatchPayment<T::AccountId, Currency::Balance>
 	for InstantCurrencyPayments<T, Currency, GetConfirmationFee, RootAccount>
 where
 	T: frame_system::Config,
@@ -118,26 +119,31 @@ fn pay_relayers_rewards<Currency, AccountId>(
 			// If delivery confirmation is submitted by other relayer, let's deduct confirmation fee
 			// from relayer reward.
 			//
-			// If confirmation fee has been increased (or if it was the only component of message fee),
-			// then messages relayer may receive zero reward.
+			// If confirmation fee has been increased (or if it was the only component of message
+			// fee), then messages relayer may receive zero reward.
 			let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into());
 			if confirmation_reward > relayer_reward {
 				confirmation_reward = relayer_reward;
 			}
 			relayer_reward = relayer_reward.saturating_sub(confirmation_reward);
-			confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(confirmation_reward);
+			confirmation_relayer_reward =
+				confirmation_relayer_reward.saturating_add(confirmation_reward);
 		} else {
 			// If delivery confirmation is submitted by this relayer, let's add confirmation fee
 			// from other relayers to this relayer reward.
 			confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward);
-			continue;
+			continue
 		}
 
 		pay_relayer_reward::<Currency, _>(relayer_fund_account, &relayer, relayer_reward);
 	}
 
 	// finally - pay reward to confirmation relayer
-	pay_relayer_reward::<Currency, _>(relayer_fund_account, confirmation_relayer, confirmation_relayer_reward);
+	pay_relayer_reward::<Currency, _>(
+		relayer_fund_account,
+		confirmation_relayer,
+		confirmation_relayer_reward,
+	);
 }
 
 /// Transfer funds from relayers fund account to given relayer.
@@ -150,7 +156,7 @@ fn pay_relayer_reward<Currency, AccountId>(
 	Currency: CurrencyT<AccountId>,
 {
 	if reward.is_zero() {
-		return;
+		return
 	}
 
 	let pay_result = Currency::transfer(
@@ -193,20 +199,8 @@ mod tests {
 
 	fn relayers_rewards() -> RelayersRewards<TestAccountId, TestBalance> {
 		vec![
-			(
-				RELAYER_1,
-				RelayerRewards {
-					reward: 100,
-					messages: 2,
-				},
-			),
-			(
-				RELAYER_2,
-				RelayerRewards {
-					reward: 100,
-					messages: 3,
-				},
-			),
+			(RELAYER_1, RelayerRewards { reward: 100, messages: 2 }),
+			(RELAYER_2, RelayerRewards { reward: 100, messages: 3 }),
 		]
 		.into_iter()
 		.collect()
@@ -215,7 +209,12 @@ mod tests {
 	#[test]
 	fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_2, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_2,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				10,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 80);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 120);
@@ -225,7 +224,12 @@ mod tests {
 	#[test]
 	fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_3,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				10,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 80);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 70);
@@ -236,7 +240,12 @@ mod tests {
 	#[test]
 	fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 1000);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_3,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				1000,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 0);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 0);
diff --git a/bridges/modules/messages/src/lib.rs b/bridges/modules/messages/src/lib.rs
index 8e6f0969b548505aa9d97382cc831473ca799164..c5ce27bb7258a5554faaa2be91a7a21d7d50546b 100644
--- a/bridges/modules/messages/src/lib.rs
+++ b/bridges/modules/messages/src/lib.rs
@@ -38,22 +38,27 @@
 #![allow(clippy::unused_unit)]
 
 pub use crate::weights_ext::{
-	ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt,
-	EXPECTED_DEFAULT_MESSAGE_LENGTH,
+	ensure_able_to_receive_confirmation, ensure_able_to_receive_message,
+	ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH,
 };
 
-use crate::inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult};
-use crate::outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult};
-use crate::weights::WeightInfo;
+use crate::{
+	inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult},
+	outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult},
+	weights::WeightInfo,
+};
 
 use bp_messages::{
 	source_chain::{
-		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, OnMessageAccepted,
-		RelayersRewards, TargetHeaderChain,
+		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed,
+		OnMessageAccepted, RelayersRewards, TargetHeaderChain,
+	},
+	target_chain::{
+		DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain,
 	},
-	target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
-	total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce,
-	OperatingMode, OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState,
+	total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey,
+	MessageNonce, OperatingMode, OutboundLaneData, Parameter as MessagesParameter,
+	UnrewardedRelayersState,
 };
 use bp_runtime::{ChainId, Size};
 use codec::{Decode, Encode};
@@ -112,8 +117,8 @@ pub mod pallet {
 		/// be ready to pay for its maintenance.
 		type MaxMessagesToPruneAtOnce: Get<MessageNonce>;
 		/// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the
-		/// relayer has delivered messages, but either confirmations haven't been delivered back to the
-		/// source chain, or we haven't received reward confirmations yet.
+		/// relayer has delivered messages, but either confirmations haven't been delivered back to
+		/// the source chain, or we haven't received reward confirmations yet.
 		///
 		/// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep
 		/// in mind that the same relayer account may take several (non-consecutive) entries in this
@@ -126,12 +131,13 @@ pub mod pallet {
 		/// This constant limits difference between last message from last entry of the
 		/// `InboundLaneData::relayers` and first message at the first entry.
 		///
-		/// There is no point of making this parameter lesser than MaxUnrewardedRelayerEntriesAtInboundLane,
-		/// because then maximal number of relayer entries will be limited by maximal number of messages.
+		/// There is no point of making this parameter lesser than
+		/// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries
+		/// will be limited by maximal number of messages.
 		///
-		/// This value also represents maximal number of messages in single delivery transaction. Transaction
-		/// that is declaring more messages than this value, will be rejected. Even if these messages are
-		/// from different lanes.
+		/// This value also represents maximal number of messages in single delivery transaction.
+		/// Transaction that is declaring more messages than this value, will be rejected. Even if
+		/// these messages are from different lanes.
 		type MaxUnconfirmedMessagesAtInboundLane: Get<MessageNonce>;
 
 		/// Payload type of outbound messages. This payload is dispatched on the bridged chain.
@@ -143,7 +149,8 @@ pub mod pallet {
 		type InboundPayload: Decode;
 		/// Message fee type of inbound messages. This fee is paid on the bridged chain.
 		type InboundMessageFee: Decode;
-		/// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the bridged chain.
+		/// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the
+		/// bridged chain.
 		type InboundRelayer: Parameter;
 
 		/// A type which can be turned into an AccountId from a 256-bit hash.
@@ -156,7 +163,11 @@ pub mod pallet {
 		/// Target header chain.
 		type TargetHeaderChain: TargetHeaderChain<Self::OutboundPayload, Self::AccountId>;
 		/// Message payload verifier.
-		type LaneMessageVerifier: LaneMessageVerifier<Self::AccountId, Self::OutboundPayload, Self::OutboundMessageFee>;
+		type LaneMessageVerifier: LaneMessageVerifier<
+			Self::AccountId,
+			Self::OutboundPayload,
+			Self::OutboundMessageFee,
+		>;
 		/// Message delivery payment.
 		type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment<
 			Self::AccountId,
@@ -180,13 +191,15 @@ pub mod pallet {
 	}
 
 	/// Shortcut to messages proof type for Config.
-	type MessagesProofOf<T, I> =
-		<<T as Config<I>>::SourceHeaderChain as SourceHeaderChain<<T as Config<I>>::InboundMessageFee>>::MessagesProof;
+	type MessagesProofOf<T, I> = <<T as Config<I>>::SourceHeaderChain as SourceHeaderChain<
+		<T as Config<I>>::InboundMessageFee,
+	>>::MessagesProof;
 	/// Shortcut to messages delivery proof type for Config.
-	type MessagesDeliveryProofOf<T, I> = <<T as Config<I>>::TargetHeaderChain as TargetHeaderChain<
-		<T as Config<I>>::OutboundPayload,
-		<T as frame_system::Config>::AccountId,
-	>>::MessagesDeliveryProof;
+	type MessagesDeliveryProofOf<T, I> =
+		<<T as Config<I>>::TargetHeaderChain as TargetHeaderChain<
+			<T as Config<I>>::OutboundPayload,
+			<T as frame_system::Config>::AccountId,
+		>>::MessagesDeliveryProof;
 
 	#[pallet::pallet]
 	#[pallet::generate_store(pub(super) trait Store)]
@@ -204,11 +217,11 @@ pub mod pallet {
 				Some(new_owner) => {
 					PalletOwner::<T, I>::put(&new_owner);
 					log::info!(target: "runtime::bridge-messages", "Setting pallet Owner to: {:?}", new_owner);
-				}
+				},
 				None => {
 					PalletOwner::<T, I>::kill();
 					log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet.");
-				}
+				},
 			}
 			Ok(())
 		}
@@ -217,7 +230,10 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
-		pub fn set_operating_mode(origin: OriginFor<T>, operating_mode: OperatingMode) -> DispatchResult {
+		pub fn set_operating_mode(
+			origin: OriginFor<T>,
+			operating_mode: OperatingMode,
+		) -> DispatchResult {
 			ensure_owner_or_root::<T, I>(origin)?;
 			PalletOperatingMode::<T, I>::put(operating_mode);
 			log::info!(
@@ -232,9 +248,13 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		///
-		/// The weight is: single read for permissions check + 2 writes for parameter value and event.
+		/// The weight is: single read for permissions check + 2 writes for parameter value and
+		/// event.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational))]
-		pub fn update_pallet_parameter(origin: OriginFor<T>, parameter: T::Parameter) -> DispatchResult {
+		pub fn update_pallet_parameter(
+			origin: OriginFor<T>,
+			parameter: T::Parameter,
+		) -> DispatchResult {
 			ensure_owner_or_root::<T, I>(origin)?;
 			parameter.save();
 			Self::deposit_event(Event::ParameterUpdated(parameter));
@@ -310,9 +330,9 @@ pub mod pallet {
 				// saturating_add is fine here - overflow here means that someone controls all
 				// chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee`
 				// above will fail before we reach here
-				let message_data = message_data
-					.as_mut()
-					.expect("the message is sent and not yet delivered; so it is in the storage; qed");
+				let message_data = message_data.as_mut().expect(
+					"the message is sent and not yet delivered; so it is in the storage; qed",
+				);
 				message_data.fee = message_data.fee.saturating_add(&additional_fee);
 				message_data.payload.len()
 			});
@@ -323,10 +343,7 @@ pub mod pallet {
 				T::WeightInfo::increase_message_fee(message_size as _),
 			);
 
-			Ok(PostDispatchInfo {
-				actual_weight: Some(actual_weight),
-				pays_fee: Pays::Yes,
-			})
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 
 		/// Receive messages proof from bridged chain.
@@ -354,14 +371,18 @@ pub mod pallet {
 			// why do we need to know the weight of this (`receive_messages_proof`) call? Because
 			// we may want to return some funds for not-dispatching (or partially dispatching) some
 			// messages to the call origin (relayer). And this is done by returning actual weight
-			// from the call. But we only know dispatch weight of every messages. So to refund relayer
-			// because we have not dispatched Message, we need to:
+			// from the call. But we only know dispatch weight of every messages. So to refund
+			// relayer because we have not dispatched Message, we need to:
 			//
 			// ActualWeight = DeclaredWeight - Message.DispatchWeight
 			//
 			// The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible
 			// to get pre-computed value (and it has been already computed by the executive).
-			let declared_weight = T::WeightInfo::receive_messages_proof_weight(&proof, messages_count, dispatch_weight);
+			let declared_weight = T::WeightInfo::receive_messages_proof_weight(
+				&proof,
+				messages_count,
+				dispatch_weight,
+			);
 			let mut actual_weight = declared_weight;
 
 			// verify messages proof && convert proof into messages
@@ -402,9 +423,9 @@ pub mod pallet {
 				for message in lane_data.messages {
 					debug_assert_eq!(message.key.lane_id, lane_id);
 
-					// ensure that relayer has declared enough weight for dispatching next message on
-					// this lane. We can't dispatch lane messages out-of-order, so if declared weight
-					// is not enough, let's move to next lane
+					// ensure that relayer has declared enough weight for dispatching next message
+					// on this lane. We can't dispatch lane messages out-of-order, so if declared
+					// weight is not enough, let's move to next lane
 					let dispatch_weight = T::MessageDispatch::dispatch_weight(&message);
 					if dispatch_weight > dispatch_weight_left {
 						log::trace!(
@@ -414,7 +435,7 @@ pub mod pallet {
 							dispatch_weight,
 							dispatch_weight_left,
 						);
-						break;
+						break
 					}
 					total_messages += 1;
 
@@ -438,10 +459,10 @@ pub mod pallet {
 								dispatch_result.unspent_weight,
 								!dispatch_result.dispatch_fee_paid_during_dispatch,
 							)
-						}
-						ReceivalResult::InvalidNonce
-						| ReceivalResult::TooManyUnrewardedRelayers
-						| ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true),
+						},
+						ReceivalResult::InvalidNonce |
+						ReceivalResult::TooManyUnrewardedRelayers |
+						ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true),
 					};
 
 					let unspent_weight = sp_std::cmp::min(unspent_weight, dispatch_weight);
@@ -468,10 +489,7 @@ pub mod pallet {
 				declared_weight,
 			);
 
-			Ok(PostDispatchInfo {
-				actual_weight: Some(actual_weight),
-				pays_fee: Pays::Yes,
-			})
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 
 		/// Receive messages delivery proof from bridged chain.
@@ -487,74 +505,87 @@ pub mod pallet {
 		) -> DispatchResultWithPostInfo {
 			ensure_not_halted::<T, I>()?;
 
-			// why do we need to know the weight of this (`receive_messages_delivery_proof`) call? Because
-			// we may want to return some funds for messages that are not processed by the delivery callback,
-			// or if their actual processing weight is less than accounted by weight formula.
-			// So to refund relayer, we need to:
+			// why do we need to know the weight of this (`receive_messages_delivery_proof`) call?
+			// Because we may want to return some funds for messages that are not processed by the
+			// delivery callback, or if their actual processing weight is less than accounted by
+			// weight formula. So to refund relayer, we need to:
 			//
 			// ActualWeight = DeclaredWeight - UnspentCallbackWeight
 			//
 			// The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible
 			// to get pre-computed value (and it has been already computed by the executive).
-			let single_message_callback_overhead = T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
-			let declared_weight =
-				T::WeightInfo::receive_messages_delivery_proof_weight(&proof, &relayers_state, T::DbWeight::get());
+			let single_message_callback_overhead =
+				T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
+			let declared_weight = T::WeightInfo::receive_messages_delivery_proof_weight(
+				&proof,
+				&relayers_state,
+				T::DbWeight::get(),
+			);
 			let mut actual_weight = declared_weight;
 
 			let confirmation_relayer = ensure_signed(origin)?;
-			let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| {
-				log::trace!(
-					target: "runtime::bridge-messages",
-					"Rejecting invalid messages delivery proof: {:?}",
-					err,
-				);
+			let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof)
+				.map_err(|err| {
+					log::trace!(
+						target: "runtime::bridge-messages",
+						"Rejecting invalid messages delivery proof: {:?}",
+						err,
+					);
 
-				Error::<T, I>::InvalidMessagesDeliveryProof
-			})?;
+					Error::<T, I>::InvalidMessagesDeliveryProof
+				})?;
 
 			// verify that the relayer has declared correct `lane_data::relayers` state
-			// (we only care about total number of entries and messages, because this affects call weight)
+			// (we only care about total number of entries and messages, because this affects call
+			// weight)
 			ensure!(
-				total_unrewarded_messages(&lane_data.relayers).unwrap_or(MessageNonce::MAX)
-					== relayers_state.total_messages
-					&& lane_data.relayers.len() as MessageNonce == relayers_state.unrewarded_relayer_entries,
+				total_unrewarded_messages(&lane_data.relayers).unwrap_or(MessageNonce::MAX) ==
+					relayers_state.total_messages &&
+					lane_data.relayers.len() as MessageNonce ==
+						relayers_state.unrewarded_relayer_entries,
 				Error::<T, I>::InvalidUnrewardedRelayersState
 			);
 
 			// mark messages as delivered
 			let mut lane = outbound_lane::<T, I>(lane_id);
-			let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new();
+			let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> =
+				RelayersRewards::new();
 			let last_delivered_nonce = lane_data.last_delivered_nonce();
-			let confirmed_messages =
-				match lane.confirm_delivery(relayers_state.total_messages, last_delivered_nonce, &lane_data.relayers) {
-					ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => Some(confirmed_messages),
-					ReceivalConfirmationResult::NoNewConfirmations => None,
-					ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(to_confirm_messages_count) => {
-						log::trace!(
-							target: "runtime::bridge-messages",
-							"Messages delivery proof contains too many messages to confirm: {} vs declared {}",
-							to_confirm_messages_count,
-							relayers_state.total_messages,
-						);
+			let confirmed_messages = match lane.confirm_delivery(
+				relayers_state.total_messages,
+				last_delivered_nonce,
+				&lane_data.relayers,
+			) {
+				ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) =>
+					Some(confirmed_messages),
+				ReceivalConfirmationResult::NoNewConfirmations => None,
+				ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(
+					to_confirm_messages_count,
+				) => {
+					log::trace!(
+						target: "runtime::bridge-messages",
+						"Messages delivery proof contains too many messages to confirm: {} vs declared {}",
+						to_confirm_messages_count,
+						relayers_state.total_messages,
+					);
 
-						fail!(Error::<T, I>::TryingToConfirmMoreMessagesThanExpected);
-					}
-					error => {
-						log::trace!(
-							target: "runtime::bridge-messages",
-							"Messages delivery proof contains invalid unrewarded relayers vec: {:?}",
-							error,
-						);
+					fail!(Error::<T, I>::TryingToConfirmMoreMessagesThanExpected);
+				},
+				error => {
+					log::trace!(
+						target: "runtime::bridge-messages",
+						"Messages delivery proof contains invalid unrewarded relayers vec: {:?}",
+						error,
+					);
 
-						fail!(Error::<T, I>::InvalidUnrewardedRelayers);
-					}
-				};
+					fail!(Error::<T, I>::InvalidUnrewardedRelayers);
+				},
+			};
 
 			if let Some(confirmed_messages) = confirmed_messages {
 				// handle messages delivery confirmation
-				let preliminary_callback_overhead = relayers_state
-					.total_messages
-					.saturating_mul(single_message_callback_overhead);
+				let preliminary_callback_overhead =
+					relayers_state.total_messages.saturating_mul(single_message_callback_overhead);
 				let actual_callback_weight =
 					T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages);
 				match preliminary_callback_overhead.checked_sub(actual_callback_weight) {
@@ -569,9 +600,12 @@ pub mod pallet {
 							difference,
 						);
 						actual_weight = actual_weight.saturating_sub(difference);
-					}
+					},
 					None => {
-						debug_assert!(false, "T::OnDeliveryConfirmed callback consumed too much weight.");
+						debug_assert!(
+							false,
+							"T::OnDeliveryConfirmed callback consumed too much weight."
+						);
 						log::error!(
 							target: "runtime::bridge-messages",
 							"T::OnDeliveryConfirmed callback has spent more weight that it is allowed to: \
@@ -579,7 +613,7 @@ pub mod pallet {
 							preliminary_callback_overhead,
 							actual_callback_weight,
 						);
-					}
+					},
 				}
 
 				// emit 'delivered' event
@@ -587,18 +621,27 @@ pub mod pallet {
 				Self::deposit_event(Event::MessagesDelivered(lane_id, confirmed_messages));
 
 				// remember to reward relayers that have delivered messages
-				// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain
+				// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the
+				// bridged chain
 				for entry in lane_data.relayers {
-					let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start());
+					let nonce_begin =
+						sp_std::cmp::max(entry.messages.begin, *received_range.start());
 					let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end());
 
 					// loop won't proceed if current entry is ahead of received range (begin > end).
-					// this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain
+					// this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged
+					// chain
 					let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default();
 					for nonce in nonce_begin..nonce_end + 1 {
-						let message_data = OutboundMessages::<T, I>::get(MessageKey { lane_id, nonce })
-							.expect("message was just confirmed; we never prune unconfirmed messages; qed");
-						relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee);
+						let message_data = OutboundMessages::<T, I>::get(MessageKey {
+							lane_id,
+							nonce,
+						})
+						.expect(
+							"message was just confirmed; we never prune unconfirmed messages; qed",
+						);
+						relayer_reward.reward =
+							relayer_reward.reward.saturating_add(&message_data.fee);
 						relayer_reward.messages += 1;
 					}
 				}
@@ -606,7 +649,8 @@ pub mod pallet {
 
 			// if some new messages have been confirmed, reward relayers
 			if !relayers_rewards.is_empty() {
-				let relayer_fund_account = relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
+				let relayer_fund_account =
+					relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 				<T as Config<I>>::MessageDeliveryAndDispatchPayment::pay_relayers_rewards(
 					&confirmation_relayer,
 					relayers_rewards,
@@ -621,10 +665,7 @@ pub mod pallet {
 				lane_id,
 			);
 
-			Ok(PostDispatchInfo {
-				actual_weight: Some(actual_weight),
-				pays_fee: Pays::Yes,
-			})
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 	}
 
@@ -658,14 +699,15 @@ pub mod pallet {
 		InvalidMessagesDeliveryProof,
 		/// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane).
 		InvalidUnrewardedRelayers,
-		/// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call.
+		/// The relayer has declared invalid unrewarded relayers state in the
+		/// `receive_messages_delivery_proof` call.
 		InvalidUnrewardedRelayersState,
 		/// The message someone is trying to work with (i.e. increase fee) is already-delivered.
 		MessageIsAlreadyDelivered,
 		/// The message someone is trying to work with (i.e. increase fee) is not yet sent.
 		MessageIsNotYetSent,
-		/// The number of actually confirmed messages is going to be larger than the number of messages in the proof.
-		/// This may mean that this or bridged chain storage is corrupted.
+		/// The number of actually confirmed messages is going to be larger than the number of
+		/// messages in the proof. This may mean that this or bridged chain storage is corrupted.
 		TryingToConfirmMoreMessagesThanExpected,
 	}
 
@@ -684,7 +726,8 @@ pub mod pallet {
 	/// Depending on the mode either all, some, or no transactions will be allowed.
 	#[pallet::storage]
 	#[pallet::getter(fn operating_mode)]
-	pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> = StorageValue<_, OperatingMode, ValueQuery>;
+	pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, OperatingMode, ValueQuery>;
 
 	/// Map of lane id => inbound lane data.
 	#[pallet::storage]
@@ -734,7 +777,10 @@ pub mod pallet {
 
 	impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		/// Get stored data of the outbound message with given nonce.
-		pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option<MessageData<T::OutboundMessageFee>> {
+		pub fn outbound_message_data(
+			lane: LaneId,
+			nonce: MessageNonce,
+		) -> Option<MessageData<T::OutboundMessageFee>> {
 			OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce })
 		}
 
@@ -759,7 +805,9 @@ pub mod pallet {
 		}
 
 		/// Get state of unrewarded relayers set.
-		pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState {
+		pub fn inbound_unrewarded_relayers_state(
+			lane: bp_messages::LaneId,
+		) -> bp_messages::UnrewardedRelayersState {
 			let relayers = InboundLanes::<T, I>::get(&lane).relayers;
 			bp_messages::UnrewardedRelayersState {
 				unrewarded_relayer_entries: relayers.len() as _,
@@ -804,13 +852,18 @@ pub mod storage_keys {
 /// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending
 /// on the implementation it can be used to store relayers rewards.
 /// See [`InstantCurrencyPayments`] for a concrete implementation.
-pub fn relayer_fund_account_id<AccountId, AccountIdConverter: Convert<H256, AccountId>>() -> AccountId {
+pub fn relayer_fund_account_id<AccountId, AccountIdConverter: Convert<H256, AccountId>>(
+) -> AccountId {
 	let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID);
 	AccountIdConverter::convert(encoded_id)
 }
 
-impl<T, I> bp_messages::source_chain::MessagesBridge<T::AccountId, T::OutboundMessageFee, T::OutboundPayload>
-	for Pallet<T, I>
+impl<T, I>
+	bp_messages::source_chain::MessagesBridge<
+		T::AccountId,
+		T::OutboundMessageFee,
+		T::OutboundPayload,
+	> for Pallet<T, I>
 where
 	T: Config<I>,
 	I: 'static,
@@ -862,17 +915,23 @@ fn send_message<T: Config<I>, I: 'static>(
 
 	// now let's enforce any additional lane rules
 	let mut lane = outbound_lane::<T, I>(lane_id);
-	T::LaneMessageVerifier::verify_message(&submitter, &delivery_and_dispatch_fee, &lane_id, &lane.data(), &payload)
-		.map_err(|err| {
-			log::trace!(
-				target: "runtime::bridge-messages",
-				"Message to lane {:?} is rejected by lane verifier: {:?}",
-				lane_id,
-				err,
-			);
+	T::LaneMessageVerifier::verify_message(
+		&submitter,
+		&delivery_and_dispatch_fee,
+		&lane_id,
+		&lane.data(),
+		&payload,
+	)
+	.map_err(|err| {
+		log::trace!(
+			target: "runtime::bridge-messages",
+			"Message to lane {:?} is rejected by lane verifier: {:?}",
+			lane_id,
+			err,
+		);
 
-			Error::<T, I>::MessageRejectedByLaneVerifier
-		})?;
+		Error::<T, I>::MessageRejectedByLaneVerifier
+	})?;
 
 	// let's withdraw delivery and dispatch fee from submitter
 	T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee(
@@ -896,15 +955,14 @@ fn send_message<T: Config<I>, I: 'static>(
 	// finally, save message in outbound storage and emit event
 	let encoded_payload = payload.encode();
 	let encoded_payload_len = encoded_payload.len();
-	let nonce = lane.send_message(MessageData {
-		payload: encoded_payload,
-		fee: delivery_and_dispatch_fee,
-	});
+	let nonce =
+		lane.send_message(MessageData { payload: encoded_payload, fee: delivery_and_dispatch_fee });
 	// Guaranteed to be called outside only when the message is accepted.
-	// We assume that the maximum weight call back used is `single_message_callback_overhead`, so do not perform
-	// complex db operation in callback. If you want to, put these magic logic in outside pallet and control
-	// the weight there.
-	let single_message_callback_overhead = T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
+	// We assume that the maximum weight call back used is `single_message_callback_overhead`, so do
+	// not perform complex db operation in callback. If you want to, put these magic logic in
+	// outside pallet and control the weight there.
+	let single_message_callback_overhead =
+		T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
 	let actual_callback_weight = T::OnMessageAccepted::on_messages_accepted(&lane_id, &nonce);
 	match single_message_callback_overhead.checked_sub(actual_callback_weight) {
 		Some(difference) if difference == 0 => (),
@@ -918,7 +976,7 @@ fn send_message<T: Config<I>, I: 'static>(
 				difference,
 			);
 			actual_weight = actual_weight.saturating_sub(difference);
-		}
+		},
 		None => {
 			debug_assert!(false, "T::OnMessageAccepted callback consumed too much weight.");
 			log::error!(
@@ -928,7 +986,7 @@ fn send_message<T: Config<I>, I: 'static>(
 				single_message_callback_overhead,
 				actual_callback_weight,
 			);
-		}
+		},
 	}
 
 	// message sender pays for pruning at most `MaxMessagesToPruneAtOnce` messages
@@ -963,7 +1021,9 @@ fn send_message<T: Config<I>, I: 'static>(
 fn ensure_owner_or_root<T: Config<I>, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> {
 	match origin.into() {
 		Ok(RawOrigin::Root) => Ok(()),
-		Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Pallet::<T, I>::module_owner().as_ref() => Ok(()),
+		Ok(RawOrigin::Signed(ref signer))
+			if Some(signer) == Pallet::<T, I>::module_owner().as_ref() =>
+			Ok(()),
 		_ => Err(BadOrigin),
 	}
 }
@@ -987,12 +1047,16 @@ fn ensure_not_halted<T: Config<I>, I: 'static>() -> Result<(), Error<T, I>> {
 }
 
 /// Creates new inbound lane object, backed by runtime storage.
-fn inbound_lane<T: Config<I>, I: 'static>(lane_id: LaneId) -> InboundLane<RuntimeInboundLaneStorage<T, I>> {
+fn inbound_lane<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> InboundLane<RuntimeInboundLaneStorage<T, I>> {
 	InboundLane::new(inbound_lane_storage::<T, I>(lane_id))
 }
 
 /// Creates new runtime inbound lane storage.
-fn inbound_lane_storage<T: Config<I>, I: 'static>(lane_id: LaneId) -> RuntimeInboundLaneStorage<T, I> {
+fn inbound_lane_storage<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> RuntimeInboundLaneStorage<T, I> {
 	RuntimeInboundLaneStorage {
 		lane_id,
 		cached_data: RefCell::new(None),
@@ -1001,11 +1065,10 @@ fn inbound_lane_storage<T: Config<I>, I: 'static>(lane_id: LaneId) -> RuntimeInb
 }
 
 /// Creates new outbound lane object, backed by runtime storage.
-fn outbound_lane<T: Config<I>, I: 'static>(lane_id: LaneId) -> OutboundLane<RuntimeOutboundLaneStorage<T, I>> {
-	OutboundLane::new(RuntimeOutboundLaneStorage {
-		lane_id,
-		_phantom: Default::default(),
-	})
+fn outbound_lane<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> OutboundLane<RuntimeOutboundLaneStorage<T, I>> {
+	OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() })
 }
 
 /// Runtime inbound lane storage.
@@ -1041,7 +1104,7 @@ impl<T: Config<I>, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage<
 						we have no recursive borrows; qed",
 				) = Some(data.clone());
 				data
-			}
+			},
 		}
 	}
 
@@ -1077,27 +1140,19 @@ impl<T: Config<I>, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorag
 
 	#[cfg(test)]
 	fn message(&self, nonce: &MessageNonce) -> Option<MessageData<T::OutboundMessageFee>> {
-		OutboundMessages::<T, I>::get(MessageKey {
-			lane_id: self.lane_id,
-			nonce: *nonce,
-		})
+		OutboundMessages::<T, I>::get(MessageKey { lane_id: self.lane_id, nonce: *nonce })
 	}
 
-	fn save_message(&mut self, nonce: MessageNonce, mesage_data: MessageData<T::OutboundMessageFee>) {
-		OutboundMessages::<T, I>::insert(
-			MessageKey {
-				lane_id: self.lane_id,
-				nonce,
-			},
-			mesage_data,
-		);
+	fn save_message(
+		&mut self,
+		nonce: MessageNonce,
+		mesage_data: MessageData<T::OutboundMessageFee>,
+	) {
+		OutboundMessages::<T, I>::insert(MessageKey { lane_id: self.lane_id, nonce }, mesage_data);
 	}
 
 	fn remove_message(&mut self, nonce: &MessageNonce) {
-		OutboundMessages::<T, I>::remove(MessageKey {
-			lane_id: self.lane_id,
-			nonce: *nonce,
-		});
+		OutboundMessages::<T, I>::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce });
 	}
 }
 
@@ -1130,9 +1185,10 @@ mod tests {
 	use super::*;
 	use crate::mock::{
 		message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin,
-		TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof,
-		TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2, TestOnMessageAccepted, TestRuntime, TokenConversionRate,
-		PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B,
+		TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter,
+		TestMessagesProof, TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2,
+		TestOnMessageAccepted, TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN,
+		REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B,
 	};
 	use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState};
 	use frame_support::{assert_noop, assert_ok, weights::Weight};
@@ -1148,10 +1204,8 @@ mod tests {
 	fn send_regular_message() -> Weight {
 		get_ready_for_events();
 
-		let message_nonce = outbound_lane::<TestRuntime, ()>(TEST_LANE_ID)
-			.data()
-			.latest_generated_nonce
-			+ 1;
+		let message_nonce =
+			outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().latest_generated_nonce + 1;
 		let weight = Pallet::<TestRuntime>::send_message(
 			Origin::signed(1),
 			TEST_LANE_ID,
@@ -1210,7 +1264,10 @@ mod tests {
 			System::<TestRuntime>::events(),
 			vec![EventRecord {
 				phase: Phase::Initialization,
-				event: TestEvent::Messages(Event::MessagesDelivered(TEST_LANE_ID, DeliveredMessages::new(1, true),)),
+				event: TestEvent::Messages(Event::MessagesDelivered(
+					TEST_LANE_ID,
+					DeliveredMessages::new(1, true),
+				)),
 				topics: vec![],
 			}],
 		);
@@ -1373,7 +1430,8 @@ mod tests {
 
 			// 1:1 conversion that we use by default for testnets
 			let rialto_token = 1u64;
-			let rialto_token_in_millau_tokens = TokenConversionRate::get().saturating_mul_int(rialto_token);
+			let rialto_token_in_millau_tokens =
+				TokenConversionRate::get().saturating_mul_int(rialto_token);
 			assert_eq!(rialto_token_in_millau_tokens, 1);
 
 			// let's say conversion rate is 1:1.7
@@ -1431,7 +1489,9 @@ mod tests {
 						TEST_LANE_ID,
 						InboundLaneData {
 							last_confirmed_nonce: 1,
-							relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
+							relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+								.into_iter()
+								.collect(),
 						},
 					))),
 					UnrewardedRelayersState {
@@ -1484,7 +1544,9 @@ mod tests {
 					TEST_LANE_ID,
 					InboundLaneData {
 						last_confirmed_nonce: 1,
-						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
+						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+							.into_iter()
+							.collect(),
 					},
 				))),
 				UnrewardedRelayersState {
@@ -1524,7 +1586,12 @@ mod tests {
 		run_test(|| {
 			// messages with zero fee are rejected by lane verifier
 			assert_noop!(
-				Pallet::<TestRuntime>::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0),
+				Pallet::<TestRuntime>::send_message(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					REGULAR_PAYLOAD,
+					0
+				),
 				Error::<TestRuntime, ()>::MessageRejectedByLaneVerifier,
 			);
 		});
@@ -1587,11 +1654,10 @@ mod tests {
 			);
 
 			// message proof includes outbound lane state with latest confirmed message updated to 9
-			let mut message_proof: TestMessagesProof = Ok(vec![message(11, REGULAR_PAYLOAD)]).into();
-			message_proof.result.as_mut().unwrap()[0].1.lane_state = Some(OutboundLaneData {
-				latest_received_nonce: 9,
-				..Default::default()
-			});
+			let mut message_proof: TestMessagesProof =
+				Ok(vec![message(11, REGULAR_PAYLOAD)]).into();
+			message_proof.result.as_mut().unwrap()[0].1.lane_state =
+				Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() });
 
 			assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
 				Origin::signed(1),
@@ -1705,7 +1771,9 @@ mod tests {
 				TestMessagesDeliveryProof(Ok((
 					TEST_LANE_ID,
 					InboundLaneData {
-						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
+						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+							.into_iter()
+							.collect(),
 						..Default::default()
 					}
 				))),
@@ -1715,16 +1783,11 @@ mod tests {
 					..Default::default()
 				},
 			));
-			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_A,
-				1000
-			));
-			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_B,
-				2000
-			));
+			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000));
+			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000));
 
-			// this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B
+			// this reports delivery of both message 1 and message 2 => reward is paid only to
+			// TEST_RELAYER_B
 			assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
 				Origin::signed(1),
 				TestMessagesDeliveryProof(Ok((
@@ -1745,14 +1808,8 @@ mod tests {
 					..Default::default()
 				},
 			));
-			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_A,
-				1000
-			));
-			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_B,
-				2000
-			));
+			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000));
+			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000));
 		});
 	}
 
@@ -1839,10 +1896,7 @@ mod tests {
 				0, // weight may be zero in this case (all messages are improperly encoded)
 			),);
 
-			assert_eq!(
-				InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(),
-				1,
-			);
+			assert_eq!(InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(), 1,);
 		});
 	}
 
@@ -1855,27 +1909,22 @@ mod tests {
 			assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
 				Origin::signed(1),
 				TEST_RELAYER_A,
-				Ok(vec![
-					message(1, REGULAR_PAYLOAD),
-					invalid_message,
-					message(3, REGULAR_PAYLOAD),
-				])
+				Ok(
+					vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),]
+				)
 				.into(),
 				3,
 				REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight,
 			),);
 
-			assert_eq!(
-				InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(),
-				3,
-			);
+			assert_eq!(InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(), 3,);
 		});
 	}
 
 	#[test]
 	fn storage_message_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted messages proofs.
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted messages proofs.
 		let storage_key = storage_keys::message_key("BridgeMessages", &*b"test", 42).0;
 		assert_eq!(
 			storage_key,
@@ -1887,8 +1936,8 @@ mod tests {
 
 	#[test]
 	fn outbound_lane_data_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted outbound lane state proofs.
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted outbound lane state proofs.
 		let storage_key = storage_keys::outbound_lane_data_key("BridgeMessages", &*b"test").0;
 		assert_eq!(
 			storage_key,
@@ -1900,8 +1949,8 @@ mod tests {
 
 	#[test]
 	fn inbound_lane_data_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted inbound lane state proofs.
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted inbound lane state proofs.
 		let storage_key = storage_keys::inbound_lane_data_key("BridgeMessages", &*b"test").0;
 		assert_eq!(
 			storage_key,
@@ -1937,7 +1986,12 @@ mod tests {
 			receive_messages_delivery_proof();
 
 			assert_noop!(
-				Pallet::<TestRuntime, ()>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
 				Error::<TestRuntime, ()>::MessageIsAlreadyDelivered,
 			);
 		});
@@ -1947,7 +2001,12 @@ mod tests {
 	fn increase_message_fee_fails_if_message_is_not_yet_sent() {
 		run_test(|| {
 			assert_noop!(
-				Pallet::<TestRuntime, ()>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
 				Error::<TestRuntime, ()>::MessageIsNotYetSent,
 			);
 		});
@@ -1961,7 +2020,12 @@ mod tests {
 			TestMessageDeliveryAndDispatchPayment::reject_payments();
 
 			assert_noop!(
-				Pallet::<TestRuntime, ()>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
 				Error::<TestRuntime, ()>::FailedToWithdrawMessageFee,
 			);
 		});
@@ -1995,11 +2059,12 @@ mod tests {
 				payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid;
 				let proof = Ok(vec![message(nonce, payload)]).into();
 				let messages_count = 1;
-				let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
-					&proof,
-					messages_count,
-					REGULAR_PAYLOAD.declared_weight,
-				);
+				let pre_dispatch_weight =
+					<TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
+						&proof,
+						messages_count,
+						REGULAR_PAYLOAD.declared_weight,
+					);
 				let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_proof(
 					Origin::signed(1),
 					TEST_RELAYER_A,
@@ -2023,7 +2088,8 @@ mod tests {
 			assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
 
 			// when dispatch is returning `unspent_weight > declared_weight`
-			let (pre, post) = submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false);
+			let (pre, post) =
+				submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false);
 			assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
 
 			// when there's no unspent weight
@@ -2122,16 +2188,20 @@ mod tests {
 			total_messages: 3,
 			..Default::default()
 		};
-		let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::receive_messages_delivery_proof_weight(
-			&proof,
-			&relayers_state,
-			crate::mock::DbWeight::get(),
-		);
-		let post_dispatch_weight =
-			Pallet::<TestRuntime>::receive_messages_delivery_proof(Origin::signed(1), proof, relayers_state)
-				.expect("confirmation has failed")
-				.actual_weight
-				.expect("receive_messages_delivery_proof always returns Some");
+		let pre_dispatch_weight =
+			<TestRuntime as Config>::WeightInfo::receive_messages_delivery_proof_weight(
+				&proof,
+				&relayers_state,
+				crate::mock::DbWeight::get(),
+			);
+		let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_delivery_proof(
+			Origin::signed(1),
+			proof,
+			relayers_state,
+		)
+		.expect("confirmation has failed")
+		.actual_weight
+		.expect("receive_messages_delivery_proof always returns Some");
 		(pre_dispatch_weight, post_dispatch_weight)
 	}
 
@@ -2146,7 +2216,9 @@ mod tests {
 	#[test]
 	fn receive_messages_delivery_proof_refunds_non_zero_weight() {
 		run_test(|| {
-			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(crate::mock::DbWeight::get().writes(1));
+			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().writes(1),
+			);
 
 			let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery();
 			assert_eq!(
@@ -2160,13 +2232,16 @@ mod tests {
 	#[should_panic]
 	fn receive_messages_panics_in_debug_mode_if_callback_is_wrong() {
 		run_test(|| {
-			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(crate::mock::DbWeight::get().reads_writes(2, 2));
+			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().reads_writes(2, 2),
+			);
 			confirm_3_messages_delivery()
 		});
 	}
 
 	#[test]
-	fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected() {
+	fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected(
+	) {
 		run_test(|| {
 			// send message first to be able to check that delivery_proof fails later
 			send_regular_message();
@@ -2182,10 +2257,7 @@ mod tests {
 					Origin::signed(1),
 					TestMessagesDeliveryProof(Ok((
 						TEST_LANE_ID,
-						InboundLaneData {
-							last_confirmed_nonce: 1,
-							relayers: Default::default(),
-						},
+						InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() },
 					))),
 					UnrewardedRelayersState::default(),
 				),
@@ -2215,15 +2287,17 @@ mod tests {
 				100,
 			));
 
-			let small_weight = Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1)
-				.expect("increase_message_fee has failed")
-				.actual_weight
-				.expect("increase_message_fee always returns Some");
+			let small_weight =
+				Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1)
+					.expect("increase_message_fee has failed")
+					.actual_weight
+					.expect("increase_message_fee always returns Some");
 
-			let large_weight = Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 2, 1)
-				.expect("increase_message_fee has failed")
-				.actual_weight
-				.expect("increase_message_fee always returns Some");
+			let large_weight =
+				Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 2, 1)
+					.expect("increase_message_fee has failed")
+					.actual_weight
+					.expect("increase_message_fee always returns Some");
 
 			assert!(
 				large_weight > small_weight,
@@ -2272,7 +2346,8 @@ mod tests {
 			let weight_when_max_messages_are_pruned = send_regular_message();
 			assert_eq!(
 				weight_when_max_messages_are_pruned,
-				when_zero_messages_are_pruned + crate::mock::DbWeight::get().writes(max_messages_to_prune),
+				when_zero_messages_are_pruned +
+					crate::mock::DbWeight::get().writes(max_messages_to_prune),
 			);
 		});
 	}
@@ -2289,7 +2364,9 @@ mod tests {
 	#[should_panic]
 	fn message_accepted_panics_in_debug_mode_if_callback_is_wrong() {
 		run_test(|| {
-			TestOnMessageAccepted::set_consumed_weight_per_message(crate::mock::DbWeight::get().reads_writes(2, 2));
+			TestOnMessageAccepted::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().reads_writes(2, 2),
+			);
 			send_regular_message();
 		});
 	}
@@ -2297,14 +2374,16 @@ mod tests {
 	#[test]
 	fn message_accepted_refunds_non_zero_weight() {
 		run_test(|| {
-			TestOnMessageAccepted::set_consumed_weight_per_message(crate::mock::DbWeight::get().writes(1));
+			TestOnMessageAccepted::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().writes(1),
+			);
 			let actual_callback_weight = send_regular_message();
 			let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::send_message_weight(
 				&REGULAR_PAYLOAD,
 				crate::mock::DbWeight::get(),
 			);
-			let prune_weight =
-				crate::mock::DbWeight::get().writes(<TestRuntime as Config>::MaxMessagesToPruneAtOnce::get());
+			let prune_weight = crate::mock::DbWeight::get()
+				.writes(<TestRuntime as Config>::MaxMessagesToPruneAtOnce::get());
 
 			assert_eq!(
 				pre_dispatch_weight.saturating_sub(actual_callback_weight),
diff --git a/bridges/modules/messages/src/mock.rs b/bridges/modules/messages/src/mock.rs
index 84496393d562ddf04ee0bc041960bbdebcb494c0..330cfef4cdea0208a300c3dcfaad9a0c7356720e 100644
--- a/bridges/modules/messages/src/mock.rs
+++ b/bridges/modules/messages/src/mock.rs
@@ -22,12 +22,14 @@ use crate::Config;
 use bitvec::prelude::*;
 use bp_messages::{
 	source_chain::{
-		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, OnMessageAccepted,
-		RelayersRewards, Sender, TargetHeaderChain,
+		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed,
+		OnMessageAccepted, RelayersRewards, Sender, TargetHeaderChain,
 	},
-	target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
-	DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData,
-	Parameter as MessagesParameter, UnrewardedRelayer,
+	target_chain::{
+		DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain,
+	},
+	DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce,
+	OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayer,
 };
 use bp_runtime::{messages::MessageDispatchResult, Size};
 use codec::{Decode, Encode};
@@ -53,8 +55,8 @@ pub struct TestPayload {
 	pub declared_weight: Weight,
 	/// Message dispatch result.
 	///
-	/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, but for test
-	/// purposes we'll be making it larger than `declared_weight` sometimes.
+	/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`,
+	/// but for test purposes we'll be making it larger than `declared_weight` sometimes.
 	pub dispatch_result: MessageDispatchResult,
 	/// Extra bytes that affect payload size.
 	pub extra: Vec<u8>,
@@ -153,7 +155,8 @@ pub enum TestMessagesParameter {
 impl MessagesParameter for TestMessagesParameter {
 	fn save(&self) {
 		match *self {
-			TestMessagesParameter::TokenConversionRate(conversion_rate) => TokenConversionRate::set(&conversion_rate),
+			TestMessagesParameter::TokenConversionRate(conversion_rate) =>
+				TokenConversionRate::set(&conversion_rate),
 		}
 	}
 }
@@ -235,14 +238,12 @@ impl From<Result<Vec<Message<TestMessageFee>>, ()>> for TestMessagesProof {
 	fn from(result: Result<Vec<Message<TestMessageFee>>, ()>) -> Self {
 		Self {
 			result: result.map(|messages| {
-				let mut messages_by_lane: BTreeMap<LaneId, ProvedLaneMessages<Message<TestMessageFee>>> =
-					BTreeMap::new();
+				let mut messages_by_lane: BTreeMap<
+					LaneId,
+					ProvedLaneMessages<Message<TestMessageFee>>,
+				> = BTreeMap::new();
 				for message in messages {
-					messages_by_lane
-						.entry(message.key.lane_id)
-						.or_default()
-						.messages
-						.push(message);
+					messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message);
 				}
 				messages_by_lane.into_iter().collect()
 			}),
@@ -318,7 +319,8 @@ impl TestMessageDeliveryAndDispatchPayment {
 
 	/// Returns true if given fee has been paid by given submitter.
 	pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool {
-		frame_support::storage::unhashed::get(b":message-fee:") == Some((Sender::Signed(submitter), fee))
+		frame_support::storage::unhashed::get(b":message-fee:") ==
+			Some((Sender::Signed(submitter), fee))
 	}
 
 	/// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is
@@ -329,7 +331,9 @@ impl TestMessageDeliveryAndDispatchPayment {
 	}
 }
 
-impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessageDeliveryAndDispatchPayment {
+impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee>
+	for TestMessageDeliveryAndDispatchPayment
+{
 	type Error = &'static str;
 
 	fn pay_delivery_and_dispatch_fee(
@@ -338,7 +342,7 @@ impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessag
 		_relayer_fund_account: &AccountId,
 	) -> Result<(), Self::Error> {
 		if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) {
-			return Err(TEST_ERROR);
+			return Err(TEST_ERROR)
 		}
 
 		frame_support::storage::unhashed::put(b":message-fee:", &(submitter, fee));
@@ -382,7 +386,8 @@ impl OnMessageAccepted for TestOnMessageAccepted {
 	fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight {
 		let key = (b"TestOnMessageAccepted", lane, message).encode();
 		frame_support::storage::unhashed::put(&key, &true);
-		Self::get_consumed_weight_per_message().unwrap_or_else(|| DbWeight::get().reads_writes(1, 1))
+		Self::get_consumed_weight_per_message()
+			.unwrap_or_else(|| DbWeight::get().reads_writes(1, 1))
 	}
 }
 
@@ -451,10 +456,7 @@ impl SourceHeaderChain<TestMessageFee> for TestSourceHeaderChain {
 		proof: Self::MessagesProof,
 		_messages_count: u32,
 	) -> Result<ProvedMessages<Message<TestMessageFee>>, Self::Error> {
-		proof
-			.result
-			.map(|proof| proof.into_iter().collect())
-			.map_err(|_| TEST_ERROR)
+		proof.result.map(|proof| proof.into_iter().collect()).map_err(|_| TEST_ERROR)
 	}
 }
 
@@ -485,31 +487,17 @@ impl MessageDispatch<AccountId, TestMessageFee> for TestMessageDispatch {
 
 /// Return test lane message with given nonce and payload.
 pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message<TestMessageFee> {
-	Message {
-		key: MessageKey {
-			lane_id: TEST_LANE_ID,
-			nonce,
-		},
-		data: message_data(payload),
-	}
+	Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, data: message_data(payload) }
 }
 
 /// Constructs message payload using given arguments and zero unspent weight.
 pub const fn message_payload(id: u64, declared_weight: Weight) -> TestPayload {
-	TestPayload {
-		id,
-		declared_weight,
-		dispatch_result: dispatch_result(0),
-		extra: Vec::new(),
-	}
+	TestPayload { id, declared_weight, dispatch_result: dispatch_result(0), extra: Vec::new() }
 }
 
 /// Return message data with valid fee for given payload.
 pub fn message_data(payload: TestPayload) -> MessageData<TestMessageFee> {
-	MessageData {
-		payload: payload.encode(),
-		fee: 1,
-	}
+	MessageData { payload: payload.encode(), fee: 1 }
 }
 
 /// Returns message dispatch result with given unspent weight.
@@ -543,14 +531,10 @@ pub fn unrewarded_relayer(
 
 /// Run pallet test.
 pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
-	let mut t = frame_system::GenesisConfig::default()
-		.build_storage::<TestRuntime>()
+	let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
+	pallet_balances::GenesisConfig::<TestRuntime> { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] }
+		.assimilate_storage(&mut t)
 		.unwrap();
-	pallet_balances::GenesisConfig::<TestRuntime> {
-		balances: vec![(ENDOWED_ACCOUNT, 1_000_000)],
-	}
-	.assimilate_storage(&mut t)
-	.unwrap();
 	let mut ext = sp_io::TestExternalities::new(t);
 	ext.execute_with(test)
 }
diff --git a/bridges/modules/messages/src/outbound_lane.rs b/bridges/modules/messages/src/outbound_lane.rs
index 1962e0282a8e7646731f3d887ba0e17279a9ed06..c05437596db82af9f4ca4d3bacae457aa32c58dc 100644
--- a/bridges/modules/messages/src/outbound_lane.rs
+++ b/bridges/modules/messages/src/outbound_lane.rs
@@ -18,7 +18,8 @@
 
 use bitvec::prelude::*;
 use bp_messages::{
-	DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData,
+	UnrewardedRelayer,
 };
 use frame_support::RuntimeDebug;
 use sp_std::collections::vec_deque::VecDeque;
@@ -57,11 +58,11 @@ pub enum ReceivalConfirmationResult {
 	/// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged
 	/// chain storage.
 	EmptyUnrewardedRelayerEntry,
-	/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid bridged
-	/// chain storage.
+	/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid
+	/// bridged chain storage.
 	NonConsecutiveUnrewardedRelayerEntries,
-	/// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May be
-	/// a result of invalid bridged chain storage.
+	/// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May
+	/// be a result of invalid bridged chain storage.
 	InvalidNumberOfDispatchResults,
 	/// The chain has more messages that need to be confirmed than there is in the proof.
 	TryingToConfirmMoreMessagesThanExpected(MessageNonce),
@@ -106,27 +107,30 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
 	) -> ReceivalConfirmationResult {
 		let mut data = self.storage.data();
 		if latest_delivered_nonce <= data.latest_received_nonce {
-			return ReceivalConfirmationResult::NoNewConfirmations;
+			return ReceivalConfirmationResult::NoNewConfirmations
 		}
 		if latest_delivered_nonce > data.latest_generated_nonce {
-			return ReceivalConfirmationResult::FailedToConfirmFutureMessages;
+			return ReceivalConfirmationResult::FailedToConfirmFutureMessages
 		}
 		if latest_delivered_nonce - data.latest_received_nonce > max_allowed_messages {
-			// that the relayer has declared correct number of messages that the proof contains (it is
-			// checked outside of the function). But it may happen (but only if this/bridged chain storage is
-			// corrupted, though) that the actual number of confirmed messages if larger than declared.
-			// This would mean that 'reward loop' will take more time than the weight formula accounts,
-			// so we can't allow that.
+			// that the relayer has declared correct number of messages that the proof contains (it
+			// is checked outside of the function). But it may happen (but only if this/bridged
+			// chain storage is corrupted, though) that the actual number of confirmed messages if
+			// larger than declared. This would mean that 'reward loop' will take more time than the
+			// weight formula accounts, so we can't allow that.
 			return ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(
 				latest_delivered_nonce - data.latest_received_nonce,
-			);
+			)
 		}
 
-		let dispatch_results =
-			match extract_dispatch_results(data.latest_received_nonce, latest_delivered_nonce, relayers) {
-				Ok(dispatch_results) => dispatch_results,
-				Err(extract_error) => return extract_error,
-			};
+		let dispatch_results = match extract_dispatch_results(
+			data.latest_received_nonce,
+			latest_delivered_nonce,
+			relayers,
+		) {
+			Ok(dispatch_results) => dispatch_results,
+			Err(extract_error) => return extract_error,
+		};
 
 		let prev_latest_received_nonce = data.latest_received_nonce;
 		data.latest_received_nonce = latest_delivered_nonce;
@@ -146,7 +150,9 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
 		let mut pruned_messages = 0;
 		let mut anything_changed = false;
 		let mut data = self.storage.data();
-		while pruned_messages < max_messages_to_prune && data.oldest_unpruned_nonce <= data.latest_received_nonce {
+		while pruned_messages < max_messages_to_prune &&
+			data.oldest_unpruned_nonce <= data.latest_received_nonce
+		{
 			self.storage.remove_message(&data.oldest_unpruned_nonce);
 
 			anything_changed = true;
@@ -171,9 +177,10 @@ fn extract_dispatch_results<RelayerId>(
 	latest_received_nonce: MessageNonce,
 	relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
 ) -> Result<DispatchResultsBitVec, ReceivalConfirmationResult> {
-	// the only caller of this functions checks that the prev_latest_received_nonce..=latest_received_nonce
-	// is valid, so we're ready to accept messages in this range
-	// => with_capacity call must succeed here or we'll be unable to receive confirmations at all
+	// the only caller of this functions checks that the
+	// prev_latest_received_nonce..=latest_received_nonce is valid, so we're ready to accept
+	// messages in this range => with_capacity call must succeed here or we'll be unable to receive
+	// confirmations at all
 	let mut received_dispatch_result =
 		BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _);
 	let mut last_entry_end: Option<MessageNonce> = None;
@@ -181,43 +188,48 @@ fn extract_dispatch_results<RelayerId>(
 		// unrewarded relayer entry must have at least 1 unconfirmed message
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if entry.messages.end < entry.messages.begin {
-			return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry);
+			return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry)
 		}
 		// every entry must confirm range of messages that follows previous entry range
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if let Some(last_entry_end) = last_entry_end {
 			let expected_entry_begin = last_entry_end.checked_add(1);
 			if expected_entry_begin != Some(entry.messages.begin) {
-				return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries);
+				return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries)
 			}
 		}
 		last_entry_end = Some(entry.messages.end);
 		// entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()`
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if entry.messages.end > latest_received_nonce {
-			// technically this will be detected in the next loop iteration as `InvalidNumberOfDispatchResults`
-			// but to guarantee safety of loop operations below this is detected now
-			return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages);
+			// technically this will be detected in the next loop iteration as
+			// `InvalidNumberOfDispatchResults` but to guarantee safety of loop operations below
+			// this is detected now
+			return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages)
 		}
 		// entry must have single dispatch result for every message
 		// (guaranteed by the `InboundLane::receive_message()`)
-		if entry.messages.dispatch_results.len() as MessageNonce != entry.messages.end - entry.messages.begin + 1 {
-			return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults);
+		if entry.messages.dispatch_results.len() as MessageNonce !=
+			entry.messages.end - entry.messages.begin + 1
+		{
+			return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults)
 		}
 
 		// now we know that the entry is valid
 		// => let's check if it brings new confirmations
-		let new_messages_begin = sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1);
+		let new_messages_begin =
+			sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1);
 		let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce);
 		let new_messages_range = new_messages_begin..=new_messages_end;
 		if new_messages_range.is_empty() {
-			continue;
+			continue
 		}
 
 		// now we know that entry brings new confirmations
 		// => let's extract dispatch results
 		received_dispatch_result.extend_from_bitslice(
-			&entry.messages.dispatch_results[(new_messages_begin - entry.messages.begin) as usize..],
+			&entry.messages.dispatch_results
+				[(new_messages_begin - entry.messages.begin) as usize..],
 		);
 	}
 
@@ -228,12 +240,17 @@ fn extract_dispatch_results<RelayerId>(
 mod tests {
 	use super::*;
 	use crate::{
-		mock::{message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID},
+		mock::{
+			message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD,
+			TEST_LANE_ID,
+		},
 		outbound_lane,
 	};
 	use sp_std::ops::RangeInclusive;
 
-	fn unrewarded_relayers(nonces: RangeInclusive<MessageNonce>) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
+	fn unrewarded_relayers(
+		nonces: RangeInclusive<MessageNonce>,
+	) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
 		vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)]
 			.into_iter()
 			.collect()
diff --git a/bridges/modules/messages/src/weights_ext.rs b/bridges/modules/messages/src/weights_ext.rs
index 297b03cfc17bb100c1bb75fc37746e6a0c8dd938..fef09c6cebe577a1e92a5a36c5dda9549a135a28 100644
--- a/bridges/modules/messages/src/weights_ext.rs
+++ b/bridges/modules/messages/src/weights_ext.rs
@@ -25,8 +25,8 @@ use frame_support::weights::{RuntimeDbWeight, Weight};
 /// Size of the message being delivered in benchmarks.
 pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128;
 
-/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls
-/// we're checking here would fit 1KB.
+/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of
+/// calls we're checking here would fit 1KB.
 const SIGNED_EXTENSIONS_SIZE: u32 = 1024;
 
 /// Number of extra bytes (excluding size of storage value itself) of storage proof, built at
@@ -54,12 +54,15 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
 
 	// verify that the hardcoded value covers `receive_messages_proof` weight
 	let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight(
-		&PreComputedSize((EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize),
+		&PreComputedSize(
+			(EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize,
+		),
 		1,
 		0,
 	);
 	assert!(
-		actual_single_regular_message_delivery_tx_weight <= expected_default_message_delivery_tx_weight,
+		actual_single_regular_message_delivery_tx_weight <=
+			expected_default_message_delivery_tx_weight,
 		"Default message delivery transaction weight {} is larger than expected weight {}",
 		actual_single_regular_message_delivery_tx_weight,
 		expected_default_message_delivery_tx_weight,
@@ -91,7 +94,8 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
 		db_weight,
 	);
 	assert!(
-		actual_messages_delivery_confirmation_tx_weight <= expected_messages_delivery_confirmation_tx_weight,
+		actual_messages_delivery_confirmation_tx_weight <=
+			expected_messages_delivery_confirmation_tx_weight,
 		"Messages delivery confirmation transaction weight {} is larger than expected weight {}",
 		actual_messages_delivery_confirmation_tx_weight,
 		expected_messages_delivery_confirmation_tx_weight,
@@ -115,7 +119,8 @@ pub fn ensure_able_to_receive_message<W: WeightInfoExt>(
 	max_incoming_message_dispatch_weight: Weight,
 ) {
 	// verify that we're able to receive proof of maximal-size message
-	let max_delivery_transaction_size = max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE);
+	let max_delivery_transaction_size =
+		max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE);
 	assert!(
 		max_delivery_transaction_size <= max_extrinsic_size,
 		"Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}",
@@ -126,7 +131,9 @@ pub fn ensure_able_to_receive_message<W: WeightInfoExt>(
 
 	// verify that we're able to receive proof of maximal-size message with maximal dispatch weight
 	let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight(
-		&PreComputedSize((max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize),
+		&PreComputedSize(
+			(max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize,
+		),
 		1,
 		max_incoming_message_dispatch_weight,
 	);
@@ -158,7 +165,8 @@ pub fn ensure_able_to_receive_confirmation<W: WeightInfoExt>(
 		max_extrinsic_size,
 	);
 
-	// verify that we're able to reward maximal number of relayers that have delivered maximal number of messages
+	// verify that we're able to reward maximal number of relayers that have delivered maximal
+	// number of messages
 	let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight(
 		&PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize),
 		&UnrewardedRelayersState {
@@ -200,10 +208,15 @@ pub trait WeightInfoExt: WeightInfo {
 	}
 
 	/// Weight of message delivery extrinsic.
-	fn receive_messages_proof_weight(proof: &impl Size, messages_count: u32, dispatch_weight: Weight) -> Weight {
+	fn receive_messages_proof_weight(
+		proof: &impl Size,
+		messages_count: u32,
+		dispatch_weight: Weight,
+	) -> Weight {
 		// basic components of extrinsic weight
 		let transaction_overhead = Self::receive_messages_proof_overhead();
-		let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead();
+		let outbound_state_delivery_weight =
+			Self::receive_messages_proof_outbound_lane_state_overhead();
 		let messages_delivery_weight =
 			Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count));
 		let messages_dispatch_weight = dispatch_weight;
@@ -213,8 +226,9 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(messages_count.saturating_sub(1))
 			.saturating_add(Self::expected_extra_storage_proof_size());
 		let actual_proof_size = proof.size_hint();
-		let proof_size_overhead =
-			Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size));
+		let proof_size_overhead = Self::storage_proof_size_overhead(
+			actual_proof_size.saturating_sub(expected_proof_size),
+		);
 
 		transaction_overhead
 			.saturating_add(outbound_state_delivery_weight)
@@ -231,17 +245,21 @@ pub trait WeightInfoExt: WeightInfo {
 	) -> Weight {
 		// basic components of extrinsic weight
 		let transaction_overhead = Self::receive_messages_delivery_proof_overhead();
-		let messages_overhead = Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages);
-		let relayers_overhead =
-			Self::receive_messages_delivery_proof_relayers_overhead(relayers_state.unrewarded_relayer_entries);
+		let messages_overhead =
+			Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages);
+		let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead(
+			relayers_state.unrewarded_relayer_entries,
+		);
 
 		// proof size overhead weight
 		let expected_proof_size = Self::expected_extra_storage_proof_size();
 		let actual_proof_size = proof.size_hint();
-		let proof_size_overhead =
-			Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size));
+		let proof_size_overhead = Self::storage_proof_size_overhead(
+			actual_proof_size.saturating_sub(expected_proof_size),
+		);
 
-		// and cost of calling `OnDeliveryConfirmed::on_messages_delivered()` for every confirmed message
+		// and cost of calling `OnDeliveryConfirmed::on_messages_delivered()` for every confirmed
+		// message
 		let callback_overhead = relayers_state
 			.total_messages
 			.saturating_mul(Self::single_message_callback_overhead(db_weight));
@@ -260,22 +278,26 @@ pub trait WeightInfoExt: WeightInfo {
 		Self::send_minimal_message_worst_case()
 	}
 
-	/// Returns weight that needs to be accounted when message of given size is sent (`send_message`).
+	/// Returns weight that needs to be accounted when message of given size is sent
+	/// (`send_message`).
 	fn send_message_size_overhead(message_size: u32) -> Weight {
 		let message_size_in_kb = (1024u64 + message_size as u64) / 1024;
-		let single_kb_weight = (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15;
+		let single_kb_weight =
+			(Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15;
 		message_size_in_kb * single_kb_weight
 	}
 
 	/// Returns weight overhead of message delivery transaction (`receive_messages_proof`).
 	fn receive_messages_proof_overhead() -> Weight {
-		let weight_of_two_messages_and_two_tx_overheads = Self::receive_single_message_proof().saturating_mul(2);
+		let weight_of_two_messages_and_two_tx_overheads =
+			Self::receive_single_message_proof().saturating_mul(2);
 		let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
-		weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
+		weight_of_two_messages_and_two_tx_overheads
+			.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
 	}
 
-	/// Returns weight that needs to be accounted when receiving given a number of messages with message
-	/// delivery transaction (`receive_messages_proof`).
+	/// Returns weight that needs to be accounted when receiving given a number of messages with
+	/// message delivery transaction (`receive_messages_proof`).
 	fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight {
 		let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
 		let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof();
@@ -284,27 +306,31 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(messages as Weight)
 	}
 
-	/// Returns weight that needs to be accounted when message delivery transaction (`receive_messages_proof`)
-	/// is carrying outbound lane state proof.
+	/// Returns weight that needs to be accounted when message delivery transaction
+	/// (`receive_messages_proof`) is carrying outbound lane state proof.
 	fn receive_messages_proof_outbound_lane_state_overhead() -> Weight {
-		let weight_of_single_message_and_lane_state = Self::receive_single_message_proof_with_outbound_lane_state();
+		let weight_of_single_message_and_lane_state =
+			Self::receive_single_message_proof_with_outbound_lane_state();
 		let weight_of_single_message = Self::receive_single_message_proof();
 		weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message)
 	}
 
-	/// Returns weight overhead of delivery confirmation transaction (`receive_messages_delivery_proof`).
+	/// Returns weight overhead of delivery confirmation transaction
+	/// (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_overhead() -> Weight {
 		let weight_of_two_messages_and_two_tx_overheads =
 			Self::receive_delivery_proof_for_single_message().saturating_mul(2);
 		let weight_of_two_messages_and_single_tx_overhead =
 			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
-		weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
+		weight_of_two_messages_and_two_tx_overheads
+			.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
 	}
 
 	/// Returns weight that needs to be accounted when receiving confirmations for given a number of
 	/// messages with delivery confirmation transaction (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight {
-		let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer();
+		let weight_of_two_messages =
+			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
 		let weight_of_single_message = Self::receive_delivery_proof_for_single_message();
 		weight_of_two_messages
 			.saturating_sub(weight_of_single_message)
@@ -314,7 +340,8 @@ pub trait WeightInfoExt: WeightInfo {
 	/// Returns weight that needs to be accounted when receiving confirmations for given a number of
 	/// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight {
-		let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers();
+		let weight_of_two_messages_by_two_relayers =
+			Self::receive_delivery_proof_for_two_messages_by_two_relayers();
 		let weight_of_two_messages_by_single_relayer =
 			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
 		weight_of_two_messages_by_two_relayers
@@ -322,8 +349,8 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(relayers as Weight)
 	}
 
-	/// Returns weight that needs to be accounted when storage proof of given size is received (either in
-	/// `receive_messages_proof` or `receive_messages_delivery_proof`).
+	/// Returns weight that needs to be accounted when storage proof of given size is received
+	/// (either in `receive_messages_proof` or `receive_messages_delivery_proof`).
 	///
 	/// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof
 	/// size depends on messages count or number of entries in the unrewarded relayers set. So this
@@ -332,23 +359,26 @@ pub trait WeightInfoExt: WeightInfo {
 	/// is less than that cost).
 	fn storage_proof_size_overhead(proof_size: u32) -> Weight {
 		let proof_size_in_bytes = proof_size as Weight;
-		let byte_weight =
-			(Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024);
+		let byte_weight = (Self::receive_single_message_proof_16_kb() -
+			Self::receive_single_message_proof_1_kb()) /
+			(15 * 1024);
 		proof_size_in_bytes * byte_weight
 	}
 
 	/// Returns weight of the pay-dispatch-fee operation for inbound messages.
 	///
-	/// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain option.
+	/// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain
+	/// option.
 	fn pay_inbound_dispatch_fee_overhead() -> Weight {
-		Self::receive_single_message_proof().saturating_sub(Self::receive_single_prepaid_message_proof())
+		Self::receive_single_message_proof()
+			.saturating_sub(Self::receive_single_prepaid_message_proof())
 	}
 
 	/// Returns pre-dispatch weight of single callback call.
 	///
 	/// When benchmarking the weight please take into consideration both the `OnMessageAccepted` and
-	/// `OnDeliveryConfirmed` callbacks. The method should return the greater of the two, because it's
-	/// used to estimate the weight in both contexts.
+	/// `OnDeliveryConfirmed` callbacks. The method should return the greater of the two, because
+	/// it's used to estimate the weight in both contexts.
 	fn single_message_callback_overhead(db_weight: RuntimeDbWeight) -> Weight {
 		db_weight.reads_writes(1, 1)
 	}
diff --git a/bridges/modules/shift-session-manager/src/lib.rs b/bridges/modules/shift-session-manager/src/lib.rs
index 011b50ad595dd171294a84879c7576742677384d..511cbaa1cbeb620b2d66220b04525e19df8bea62 100644
--- a/bridges/modules/shift-session-manager/src/lib.rs
+++ b/bridges/modules/shift-session-manager/src/lib.rs
@@ -54,7 +54,7 @@ impl<T: Config> pallet_session::SessionManager<T::ValidatorId> for Pallet<T> {
 	fn new_session(session_index: sp_staking::SessionIndex) -> Option<Vec<T::ValidatorId>> {
 		// we don't want to add even more fields to genesis config => just return None
 		if session_index == 0 || session_index == 1 {
-			return None;
+			return None
 		}
 
 		// the idea that on first call (i.e. when session 1 ends) we're reading current
@@ -101,13 +101,17 @@ mod tests {
 	#![allow(clippy::from_over_into)]
 
 	use super::*;
-	use frame_support::sp_io::TestExternalities;
-	use frame_support::sp_runtime::{
-		testing::{Header, UintAuthorityId},
-		traits::{BlakeTwo256, ConvertInto, IdentityLookup},
-		Perbill, RuntimeAppPublic,
+	use frame_support::{
+		parameter_types,
+		sp_io::TestExternalities,
+		sp_runtime::{
+			testing::{Header, UintAuthorityId},
+			traits::{BlakeTwo256, ConvertInto, IdentityLookup},
+			Perbill, RuntimeAppPublic,
+		},
+		weights::Weight,
+		BasicExternalities,
 	};
-	use frame_support::{parameter_types, weights::Weight, BasicExternalities};
 	use sp_core::H256;
 
 	type AccountId = u64;
@@ -183,17 +187,21 @@ mod tests {
 	impl pallet_session::SessionHandler<AccountId> for TestSessionHandler {
 		const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
 
-		fn on_genesis_session<Ks: sp_runtime::traits::OpaqueKeys>(_validators: &[(AccountId, Ks)]) {}
+		fn on_genesis_session<Ks: sp_runtime::traits::OpaqueKeys>(_validators: &[(AccountId, Ks)]) {
+		}
 
-		fn on_new_session<Ks: sp_runtime::traits::OpaqueKeys>(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {}
+		fn on_new_session<Ks: sp_runtime::traits::OpaqueKeys>(
+			_: bool,
+			_: &[(AccountId, Ks)],
+			_: &[(AccountId, Ks)],
+		) {
+		}
 
 		fn on_disabled(_: usize) {}
 	}
 
 	fn new_test_ext() -> TestExternalities {
-		let mut t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 
 		let keys = vec![
 			(1, 1, UintAuthorityId(1)),
diff --git a/bridges/modules/token-swap/src/lib.rs b/bridges/modules/token-swap/src/lib.rs
index 5528a25296d5005bac4a6f04502ad10143d06752..9469df3aaceeb76735c46a69d17fb24d36fec18f 100644
--- a/bridges/modules/token-swap/src/lib.rs
+++ b/bridges/modules/token-swap/src/lib.rs
@@ -22,29 +22,30 @@
 //!
 //! There are four accounts participating in the swap:
 //!
-//! 1) account of This chain that has signed the `create_swap` transaction and has balance on This chain.
-//!    We'll be referring to this account as `source_account_at_this_chain`;
-//! 2) account of the Bridged chain that is sending the `claim_swap` message from the Bridged to This chain.
-//!    This account has balance on Bridged chain and is willing to swap these tokens to This chain tokens of
-//!    the `source_account_at_this_chain`. We'll be referring to this account as `target_account_at_bridged_chain`;
-//! 3) account of the Bridged chain that is indirectly controlled by the `source_account_at_this_chain`. We'll be
-//!    referring this account as `source_account_at_bridged_chain`;
-//! 4) account of This chain that is indirectly controlled by the `target_account_at_bridged_chain`. We'll be
-//!    referring this account as `target_account_at_this_chain`.
+//! 1) account of This chain that has signed the `create_swap` transaction and has balance on This
+//! chain.    We'll be referring to this account as `source_account_at_this_chain`;
+//! 2) account of the Bridged chain that is sending the `claim_swap` message from the Bridged to
+//! This chain.    This account has balance on Bridged chain and is willing to swap these tokens to
+//! This chain tokens of    the `source_account_at_this_chain`. We'll be referring to this account
+//! as `target_account_at_bridged_chain`; 3) account of the Bridged chain that is indirectly
+//! controlled by the `source_account_at_this_chain`. We'll be    referring this account as
+//! `source_account_at_bridged_chain`; 4) account of This chain that is indirectly controlled by the
+//! `target_account_at_bridged_chain`. We'll be    referring this account as
+//! `target_account_at_this_chain`.
 //!
-//! So the tokens swap is an intention of `source_account_at_this_chain` to swap his `source_balance_at_this_chain`
-//! tokens to the `target_balance_at_bridged_chain` tokens owned by `target_account_at_bridged_chain`. The swap
-//! process goes as follows:
+//! So the tokens swap is an intention of `source_account_at_this_chain` to swap his
+//! `source_balance_at_this_chain` tokens to the `target_balance_at_bridged_chain` tokens owned by
+//! `target_account_at_bridged_chain`. The swap process goes as follows:
 //!
-//! 1) the `source_account_at_this_chain` account submits the `create_swap` transaction on This chain;
-//! 2) the tokens transfer message that would transfer `target_balance_at_bridged_chain` tokens from the
-//!    `target_account_at_bridged_chain` to the `source_account_at_bridged_chain`, is sent over the bridge;
-//! 3) when transfer message is delivered and dispatched, the pallet receives notification;
-//! 4) if message has been successfully dispatched, the `target_account_at_bridged_chain` sends the message
-//!    that would transfer `source_balance_at_this_chain` tokens to his `target_account_at_this_chain`
-//!    account;
-//! 5) if message dispatch has failed, the `source_account_at_this_chain` may submit the `cancel_swap`
-//!    transaction and return his `source_balance_at_this_chain` back to his account.
+//! 1) the `source_account_at_this_chain` account submits the `create_swap` transaction on This
+//! chain; 2) the tokens transfer message that would transfer `target_balance_at_bridged_chain`
+//! tokens from the    `target_account_at_bridged_chain` to the `source_account_at_bridged_chain`,
+//! is sent over the bridge; 3) when transfer message is delivered and dispatched, the pallet
+//! receives notification; 4) if message has been successfully dispatched, the
+//! `target_account_at_bridged_chain` sends the message    that would transfer
+//! `source_balance_at_this_chain` tokens to his `target_account_at_this_chain`    account;
+//! 5) if message dispatch has failed, the `source_account_at_this_chain` may submit the
+//! `cancel_swap`    transaction and return his `source_balance_at_this_chain` back to his account.
 //!
 //! While swap is pending, the `source_balance_at_this_chain` tokens are owned by the special
 //! temporary `swap_account_at_this_chain` account. It is destroyed upon swap completion.
@@ -118,8 +119,9 @@ pub mod pallet {
 	}
 
 	/// Tokens balance at This chain.
-	pub type ThisChainBalance<T, I> =
-		<<T as Config<I>>::ThisCurrency as Currency<<T as frame_system::Config>::AccountId>>::Balance;
+	pub type ThisChainBalance<T, I> = <<T as Config<I>>::ThisCurrency as Currency<
+		<T as frame_system::Config>::AccountId,
+	>>::Balance;
 
 	/// Type of the Bridged chain.
 	pub type BridgedChainOf<T, I> = <T as Config<I>>::BridgedChain;
@@ -164,33 +166,40 @@ pub mod pallet {
 	{
 		/// Start token swap procedure.
 		///
-		/// The dispatch origin for this call must be exactly the `swap.source_account_at_this_chain` account.
+		/// The dispatch origin for this call must be exactly the
+		/// `swap.source_account_at_this_chain` account.
 		///
 		/// Method arguments are:
 		///
 		/// - `swap` - token swap intention;
-		/// - `target_public_at_bridged_chain` - the public key of the `swap.target_account_at_bridged_chain`
-		///   account used to verify `bridged_currency_transfer_signature`;
-		/// - `bridged_currency_transfer` - the SCALE-encoded tokens transfer call at the Bridged chain;
-		/// - `bridged_currency_transfer_signature` - the signature of the `swap.target_account_at_bridged_chain`
-		///   for the message returned by the `pallet_bridge_dispatch::account_ownership_digest()` function call.
+		/// - `target_public_at_bridged_chain` - the public key of the
+		///   `swap.target_account_at_bridged_chain` account used to verify
+		///   `bridged_currency_transfer_signature`;
+		/// - `bridged_currency_transfer` - the SCALE-encoded tokens transfer call at the Bridged
+		///   chain;
+		/// - `bridged_currency_transfer_signature` - the signature of the
+		///   `swap.target_account_at_bridged_chain` for the message returned by the
+		///   `pallet_bridge_dispatch::account_ownership_digest()` function call.
 		///
-		/// The `source_account_at_this_chain` MUST have enough balance to cover both token swap and message
-		/// transfer. Message fee may be estimated using corresponding `OutboundLaneApi` of This runtime.
+		/// The `source_account_at_this_chain` MUST have enough balance to cover both token swap and
+		/// message transfer. Message fee may be estimated using corresponding `OutboundLaneApi` of
+		/// This runtime.
 		///
 		/// **WARNING**: the submitter of this transaction is responsible for verifying:
 		///
-		/// 1) that the `bridged_currency_transfer` represents a valid token transfer call that transfers
-		///    `swap.target_balance_at_bridged_chain` to his `source_account_at_bridged_chain` account;
-		/// 2) that either the `source_account_at_bridged_chain` already exists, or the
-		///    `swap.target_balance_at_bridged_chain` is above existential deposit of the Bridged chain;
-		/// 3) the `target_public_at_bridged_chain` matches the `swap.target_account_at_bridged_chain`;
-		/// 4) the `bridged_currency_transfer_signature` is valid and generated by the owner of the
-		///    `target_public_at_bridged_chain` account (read more about [`CallOrigin::TargetAccount`]).
+		/// 1) that the `bridged_currency_transfer` represents a valid token transfer call that
+		/// transfers    `swap.target_balance_at_bridged_chain` to his
+		/// `source_account_at_bridged_chain` account; 2) that either the
+		/// `source_account_at_bridged_chain` already exists, or the
+		///    `swap.target_balance_at_bridged_chain` is above existential deposit of the Bridged
+		/// chain; 3) the `target_public_at_bridged_chain` matches the
+		/// `swap.target_account_at_bridged_chain`; 4) the `bridged_currency_transfer_signature` is
+		/// valid and generated by the owner of the    `target_public_at_bridged_chain` account
+		/// (read more about [`CallOrigin::TargetAccount`]).
 		///
-		/// Violating rule#1 will lead to losing your `source_balance_at_this_chain` tokens. Violating other
-		/// rules will lead to losing message fees for this and other transactions + losing fees for message
-		/// transfer.
+		/// Violating rule#1 will lead to losing your `source_balance_at_this_chain` tokens.
+		/// Violating other rules will lead to losing message fees for this and other transactions +
+		/// losing fees for message transfer.
 		#[pallet::weight(0)]
 		#[allow(clippy::too_many_arguments)]
 		pub fn create_swap(
@@ -203,7 +212,8 @@ pub mod pallet {
 			bridged_currency_transfer_weight: Weight,
 			bridged_currency_transfer_signature: BridgedAccountSignatureOf<T, I>,
 		) -> DispatchResultWithPostInfo {
-			// ensure that the `origin` is the same account that is mentioned in the `swap` intention
+			// ensure that the `origin` is the same account that is mentioned in the `swap`
+			// intention
 			let origin_account = ensure_signed(origin)?;
 			ensure!(
 				origin_account == swap.source_account_at_this_chain,
@@ -221,8 +231,8 @@ pub mod pallet {
 				Error::<T, I>::TooLowBalanceOnThisChain,
 			);
 
-			// if the swap is replay-protected, then we need to ensure that we have not yet passed the
-			// specified block yet
+			// if the swap is replay-protected, then we need to ensure that we have not yet passed
+			// the specified block yet
 			match swap.swap_type {
 				TokenSwapType::TemporaryTargetAccountAtBridgedChain => (),
 				TokenSwapType::LockClaimUntilBlock(block_number, _) => ensure!(
@@ -237,7 +247,8 @@ pub mod pallet {
 				let transfer_result = T::ThisCurrency::transfer(
 					&swap.source_account_at_this_chain,
 					&swap_account,
-					// saturating_add is ok, or we have the chain where single holder owns all tokens
+					// saturating_add is ok, or we have the chain where single holder owns all
+					// tokens
 					swap.source_balance_at_this_chain
 						.saturating_add(swap_delivery_and_dispatch_fee),
 					// if we'll allow account to die, then he'll be unable to `cancel_claim`
@@ -254,8 +265,8 @@ pub mod pallet {
 					);
 
 					return sp_runtime::TransactionOutcome::Rollback(Err(
-						Error::<T, I>::FailedToTransferToSwapAccount.into()
-					));
+						Error::<T, I>::FailedToTransferToSwapAccount.into(),
+					))
 				}
 
 				// the transfer message is sent over the bridge. The message is supposed to be a
@@ -289,20 +300,21 @@ pub mod pallet {
 
 						return sp_runtime::TransactionOutcome::Rollback(Err(
 							Error::<T, I>::FailedToSendTransferMessage.into(),
-						));
-					}
+						))
+					},
 				};
 
 				// remember that we have started the swap
 				let swap_hash = swap.using_encoded(blake2_256).into();
-				let insert_swap_result = PendingSwaps::<T, I>::try_mutate(swap_hash, |maybe_state| {
-					if maybe_state.is_some() {
-						return Err(());
-					}
-
-					*maybe_state = Some(TokenSwapState::Started);
-					Ok(())
-				});
+				let insert_swap_result =
+					PendingSwaps::<T, I>::try_mutate(swap_hash, |maybe_state| {
+						if maybe_state.is_some() {
+							return Err(())
+						}
+
+						*maybe_state = Some(TokenSwapState::Started);
+						Ok(())
+					});
 				if insert_swap_result.is_err() {
 					log::error!(
 						target: "runtime::bridge-token-swap",
@@ -310,7 +322,9 @@ pub mod pallet {
 						swap,
 					);
 
-					return sp_runtime::TransactionOutcome::Rollback(Err(Error::<T, I>::SwapAlreadyStarted.into()));
+					return sp_runtime::TransactionOutcome::Rollback(Err(
+						Error::<T, I>::SwapAlreadyStarted.into(),
+					))
 				}
 
 				log::trace!(
@@ -330,21 +344,23 @@ pub mod pallet {
 			})
 		}
 
-		/// Claim previously reserved `source_balance_at_this_chain` by `target_account_at_this_chain`.
+		/// Claim previously reserved `source_balance_at_this_chain` by
+		/// `target_account_at_this_chain`.
 		///
-		/// **WARNING**: the correct way to call this function is to call it over the messages bridge with
-		/// dispatch origin set to `pallet_bridge_dispatch::CallOrigin::SourceAccount(target_account_at_bridged_chain)`.
+		/// **WARNING**: the correct way to call this function is to call it over the messages
+		/// bridge with dispatch origin set to
+		/// `pallet_bridge_dispatch::CallOrigin::SourceAccount(target_account_at_bridged_chain)`.
 		///
 		/// This should be called only when successful transfer confirmation has been received.
 		#[pallet::weight(0)]
-		pub fn claim_swap(origin: OriginFor<T>, swap: TokenSwapOf<T, I>) -> DispatchResultWithPostInfo {
+		pub fn claim_swap(
+			origin: OriginFor<T>,
+			swap: TokenSwapOf<T, I>,
+		) -> DispatchResultWithPostInfo {
 			// ensure that the `origin` is controlled by the `swap.target_account_at_bridged_chain`
 			let origin_account = ensure_signed(origin)?;
 			let target_account_at_this_chain = target_account_at_this_chain::<T, I>(&swap);
-			ensure!(
-				origin_account == target_account_at_this_chain,
-				Error::<T, I>::InvalidClaimant,
-			);
+			ensure!(origin_account == target_account_at_this_chain, Error::<T, I>::InvalidClaimant,);
 
 			// ensure that the swap is confirmed
 			let swap_hash = swap.using_encoded(blake2_256).into();
@@ -354,13 +370,12 @@ pub mod pallet {
 				Some(TokenSwapState::Confirmed) => {
 					let is_claim_allowed = match swap.swap_type {
 						TokenSwapType::TemporaryTargetAccountAtBridgedChain => true,
-						TokenSwapType::LockClaimUntilBlock(block_number, _) => {
-							block_number < frame_system::Pallet::<T>::block_number()
-						}
+						TokenSwapType::LockClaimUntilBlock(block_number, _) =>
+							block_number < frame_system::Pallet::<T>::block_number(),
 					};
 
 					ensure!(is_claim_allowed, Error::<T, I>::SwapIsTemporaryLocked);
-				}
+				},
 				Some(TokenSwapState::Failed) => fail!(Error::<T, I>::SwapIsFailed),
 				None => fail!(Error::<T, I>::SwapIsInactive),
 			}
@@ -368,13 +383,18 @@ pub mod pallet {
 			complete_claim::<T, I>(swap, swap_hash, origin_account, Event::SwapClaimed(swap_hash))
 		}
 
-		/// Return previously reserved `source_balance_at_this_chain` back to the `source_account_at_this_chain`.
+		/// Return previously reserved `source_balance_at_this_chain` back to the
+		/// `source_account_at_this_chain`.
 		///
-		/// This should be called only when transfer has failed at Bridged chain and we have received
-		/// notification about that.
+		/// This should be called only when transfer has failed at Bridged chain and we have
+		/// received notification about that.
 		#[pallet::weight(0)]
-		pub fn cancel_swap(origin: OriginFor<T>, swap: TokenSwapOf<T, I>) -> DispatchResultWithPostInfo {
-			// ensure that the `origin` is the same account that is mentioned in the `swap` intention
+		pub fn cancel_swap(
+			origin: OriginFor<T>,
+			swap: TokenSwapOf<T, I>,
+		) -> DispatchResultWithPostInfo {
+			// ensure that the `origin` is the same account that is mentioned in the `swap`
+			// intention
 			let origin_account = ensure_signed(origin)?;
 			ensure!(
 				origin_account == swap.source_account_at_this_chain,
@@ -388,9 +408,10 @@ pub mod pallet {
 				Some(TokenSwapState::Started) => fail!(Error::<T, I>::SwapIsPending),
 				Some(TokenSwapState::Confirmed) => fail!(Error::<T, I>::SwapIsConfirmed),
 				Some(TokenSwapState::Failed) => {
-					// we allow canceling swap even before lock period is over - the `source_account_at_this_chain`
-					// has already paid for nothing and it is up to him to decide whether he want to try again
-				}
+					// we allow canceling swap even before lock period is over - the
+					// `source_account_at_this_chain` has already paid for nothing and it is up to
+					// him to decide whether he want to try again
+				},
 				None => fail!(Error::<T, I>::SwapIsInactive),
 			}
 
@@ -413,13 +434,15 @@ pub mod pallet {
 
 	#[pallet::error]
 	pub enum Error<T, I = ()> {
-		/// The account that has submitted the `start_claim` doesn't match the `TokenSwap::source_account_at_this_chain`.
+		/// The account that has submitted the `start_claim` doesn't match the
+		/// `TokenSwap::source_account_at_this_chain`.
 		MismatchedSwapSourceOrigin,
 		/// The swap balance in This chain tokens is below existential deposit and can't be made.
 		TooLowBalanceOnThisChain,
 		/// Transfer from This chain account to temporary Swap account has failed.
 		FailedToTransferToSwapAccount,
-		/// Transfer from the temporary Swap account to the derived account of Bridged account has failed.
+		/// Transfer from the temporary Swap account to the derived account of Bridged account has
+		/// failed.
 		FailedToTransferFromSwapAccount,
 		/// The message to transfer tokens on Target chain can't be sent.
 		FailedToSendTransferMessage,
@@ -431,17 +454,18 @@ pub mod pallet {
 		SwapIsFailed,
 		/// Claiming swap is not allowed.
 		///
-		/// Now the only possible case when you may get this error, is when you're trying to claim swap with
-		/// `TokenSwapType::LockClaimUntilBlock` before lock period is over.
+		/// Now the only possible case when you may get this error, is when you're trying to claim
+		/// swap with `TokenSwapType::LockClaimUntilBlock` before lock period is over.
 		SwapIsTemporaryLocked,
 		/// Swap period is finished and you can not restart it.
 		///
-		/// Now the only possible case when you may get this error, is when you're trying to start swap with
-		/// `TokenSwapType::LockClaimUntilBlock` after lock period is over.
+		/// Now the only possible case when you may get this error, is when you're trying to start
+		/// swap with `TokenSwapType::LockClaimUntilBlock` after lock period is over.
 		SwapPeriodIsFinished,
 		/// Someone is trying to cancel swap that has been confirmed.
 		SwapIsConfirmed,
-		/// Someone is trying to claim/cancel swap that is either not started or already claimed/canceled.
+		/// Someone is trying to claim/cancel swap that is either not started or already
+		/// claimed/canceled.
 		SwapIsInactive,
 		/// The swap claimant is invalid.
 		InvalidClaimant,
@@ -449,17 +473,19 @@ pub mod pallet {
 
 	/// Pending token swaps states.
 	#[pallet::storage]
-	pub type PendingSwaps<T: Config<I>, I: 'static = ()> = StorageMap<_, Identity, H256, TokenSwapState>;
+	pub type PendingSwaps<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, TokenSwapState>;
 
 	/// Pending transfer messages.
 	#[pallet::storage]
-	pub type PendingMessages<T: Config<I>, I: 'static = ()> = StorageMap<_, Identity, MessageNonce, H256>;
+	pub type PendingMessages<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, MessageNonce, H256>;
 
 	impl<T: Config<I>, I: 'static> OnDeliveryConfirmed for Pallet<T, I> {
 		fn on_messages_delivered(lane: &LaneId, delivered_messages: &DeliveredMessages) -> Weight {
 			// we're only interested in our lane messages
 			if *lane != T::OutboundMessageLaneId::get() {
-				return 0;
+				return 0
 			}
 
 			// so now we're dealing with our lane messages. Ideally we'll have dedicated lane
@@ -472,11 +498,12 @@ pub mod pallet {
 				if let Some(swap_hash) = PendingMessages::<T, I>::take(message_nonce) {
 					writes += 1;
 
-					let token_swap_state = if delivered_messages.message_dispatch_result(message_nonce) {
-						TokenSwapState::Confirmed
-					} else {
-						TokenSwapState::Failed
-					};
+					let token_swap_state =
+						if delivered_messages.message_dispatch_result(message_nonce) {
+							TokenSwapState::Confirmed
+						} else {
+							TokenSwapState::Failed
+						};
 
 					log::trace!(
 						target: "runtime::bridge-token-swap",
@@ -494,12 +521,16 @@ pub mod pallet {
 	}
 
 	/// Returns temporary account id used to lock funds during swap on This chain.
-	pub(crate) fn swap_account_id<T: Config<I>, I: 'static>(swap: &TokenSwapOf<T, I>) -> T::AccountId {
+	pub(crate) fn swap_account_id<T: Config<I>, I: 'static>(
+		swap: &TokenSwapOf<T, I>,
+	) -> T::AccountId {
 		T::FromSwapToThisAccountIdConverter::convert(swap.using_encoded(blake2_256).into())
 	}
 
 	/// Expected target account representation on This chain (aka `target_account_at_this_chain`).
-	pub(crate) fn target_account_at_this_chain<T: Config<I>, I: 'static>(swap: &TokenSwapOf<T, I>) -> T::AccountId {
+	pub(crate) fn target_account_at_this_chain<T: Config<I>, I: 'static>(
+		swap: &TokenSwapOf<T, I>,
+	) -> T::AccountId {
 		T::FromBridgedToThisAccountIdConverter::convert(bp_runtime::derive_account_id(
 			T::BridgedChainId::get(),
 			bp_runtime::SourceAccount::Account(swap.target_account_at_bridged_chain.clone()),
@@ -533,8 +564,8 @@ pub mod pallet {
 				);
 
 				return sp_runtime::TransactionOutcome::Rollback(Err(
-					Error::<T, I>::FailedToTransferFromSwapAccount.into()
-				));
+					Error::<T, I>::FailedToTransferFromSwapAccount.into(),
+				))
 			}
 
 			log::trace!(
@@ -786,20 +817,21 @@ mod tests {
 			));
 
 			let swap_hash = test_swap_hash();
-			assert_eq!(
-				PendingSwaps::<TestRuntime>::get(swap_hash),
-				Some(TokenSwapState::Started)
-			);
+			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), Some(TokenSwapState::Started));
 			assert_eq!(PendingMessages::<TestRuntime>::get(MESSAGE_NONCE), Some(swap_hash));
 			assert_eq!(
-				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<TestRuntime, ()>(&test_swap())),
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
 				test_swap().source_balance_at_this_chain + SWAP_DELIVERY_AND_DISPATCH_FEE,
 			);
 			assert!(
-				frame_system::Pallet::<TestRuntime>::events()
-					.iter()
-					.any(|e| e.event
-						== crate::mock::Event::TokenSwap(crate::Event::SwapStarted(swap_hash, MESSAGE_NONCE,))),
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapStarted(
+						swap_hash,
+						MESSAGE_NONCE,
+					))),
 				"Missing SwapStarted event: {:?}",
 				frame_system::Pallet::<TestRuntime>::events(),
 			);
@@ -811,7 +843,9 @@ mod tests {
 		run_test(|| {
 			assert_noop!(
 				Pallet::<TestRuntime>::claim_swap(
-					Origin::signed(1 + target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					Origin::signed(
+						1 + target_account_at_this_chain::<TestRuntime, ()>(&test_swap())
+					),
 					test_swap(),
 				),
 				Error::<TestRuntime, ()>::InvalidClaimant
@@ -913,19 +947,21 @@ mod tests {
 			let swap_hash = test_swap_hash();
 			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), None);
 			assert_eq!(
-				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<TestRuntime, ()>(&test_swap())),
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
 				0,
 			);
 			assert_eq!(
-				pallet_balances::Pallet::<TestRuntime>::free_balance(&target_account_at_this_chain::<TestRuntime, ()>(
-					&test_swap()
-				),),
+				pallet_balances::Pallet::<TestRuntime>::free_balance(
+					&target_account_at_this_chain::<TestRuntime, ()>(&test_swap()),
+				),
 				test_swap().source_balance_at_this_chain,
 			);
 			assert!(
-				frame_system::Pallet::<TestRuntime>::events()
-					.iter()
-					.any(|e| e.event == crate::mock::Event::TokenSwap(crate::Event::SwapClaimed(swap_hash,))),
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapClaimed(swap_hash,))),
 				"Missing SwapClaimed event: {:?}",
 				frame_system::Pallet::<TestRuntime>::events(),
 			);
@@ -939,7 +975,10 @@ mod tests {
 			receive_test_swap_confirmation(false);
 
 			assert_noop!(
-				Pallet::<TestRuntime>::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT + 1), test_swap()),
+				Pallet::<TestRuntime>::cancel_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT + 1),
+					test_swap()
+				),
 				Error::<TestRuntime, ()>::MismatchedSwapSourceOrigin
 			);
 		});
@@ -1014,7 +1053,10 @@ mod tests {
 			let swap_hash = test_swap_hash();
 			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), None);
 			assert_eq!(
-				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<TestRuntime, ()>(&test_swap())),
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
 				0,
 			);
 			assert_eq!(
@@ -1022,9 +1064,8 @@ mod tests {
 				THIS_CHAIN_ACCOUNT_BALANCE - SWAP_DELIVERY_AND_DISPATCH_FEE,
 			);
 			assert!(
-				frame_system::Pallet::<TestRuntime>::events()
-					.iter()
-					.any(|e| e.event == crate::mock::Event::TokenSwap(crate::Event::SwapCanceled(swap_hash,))),
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapCanceled(swap_hash,))),
 				"Missing SwapCanceled event: {:?}",
 				frame_system::Pallet::<TestRuntime>::events(),
 			);
@@ -1047,7 +1088,10 @@ mod tests {
 			// when unrelated messages are delivered
 			let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 2, true);
 			messages.note_dispatched_message(false);
-			Pallet::<TestRuntime, ()>::on_messages_delivered(&OutboundMessageLaneId::get(), &messages);
+			Pallet::<TestRuntime, ()>::on_messages_delivered(
+				&OutboundMessageLaneId::get(),
+				&messages,
+			);
 			assert_eq!(
 				PendingMessages::<TestRuntime, ()>::get(MESSAGE_NONCE),
 				Some(test_swap_hash())
@@ -1061,7 +1105,10 @@ mod tests {
 			let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 1, false);
 			messages.note_dispatched_message(true);
 			messages.note_dispatched_message(false);
-			Pallet::<TestRuntime, ()>::on_messages_delivered(&OutboundMessageLaneId::get(), &messages);
+			Pallet::<TestRuntime, ()>::on_messages_delivered(
+				&OutboundMessageLaneId::get(),
+				&messages,
+			);
 			assert_eq!(PendingMessages::<TestRuntime, ()>::get(MESSAGE_NONCE), None);
 			assert_eq!(
 				PendingSwaps::<TestRuntime, ()>::get(test_swap_hash()),
diff --git a/bridges/modules/token-swap/src/mock.rs b/bridges/modules/token-swap/src/mock.rs
index ed5c1b7cee316191a972b3847fab94d9ccdd13af..16894d9850ba07dd9b3256e3090bcf0274338520 100644
--- a/bridges/modules/token-swap/src/mock.rs
+++ b/bridges/modules/token-swap/src/mock.rs
@@ -172,9 +172,7 @@ impl sp_runtime::traits::Convert<H256, AccountId> for TestAccountConverter {
 
 /// Run pallet test.
 pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
-	let mut t = frame_system::GenesisConfig::default()
-		.build_storage::<TestRuntime>()
-		.unwrap();
+	let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 	pallet_balances::GenesisConfig::<TestRuntime> {
 		balances: vec![(THIS_CHAIN_ACCOUNT, THIS_CHAIN_ACCOUNT_BALANCE)],
 	}
diff --git a/bridges/primitives/chain-kusama/src/lib.rs b/bridges/primitives/chain-kusama/src/lib.rs
index f4c448e41c60fd295f309d64c52ae0ccab94e080..9a6eb66d22865be745cf65c15e85a7ebd2252667 100644
--- a/bridges/primitives/chain-kusama/src/lib.rs
+++ b/bridges/primitives/chain-kusama/src/lib.rs
@@ -21,7 +21,9 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
-use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -83,29 +85,36 @@ pub const SESSION_LENGTH: BlockNumber = time_units::HOURS;
 pub const WITH_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages";
 
 /// Name of the DOT->KSM conversion rate stored in the Kusama runtime.
-pub const POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME: &str = "PolkadotToKusamaConversionRate";
+pub const POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME: &str =
+	"PolkadotToKusamaConversionRate";
 
 /// Name of the `KusamaFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized";
 /// Name of the `KusamaFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_header";
 
-/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method.
 pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details";
 /// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce";
+pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToKusamaOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_received_nonce";
+pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToKusamaOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromKusamaInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_received_nonce";
+pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromKusamaInboundLaneApi_latest_received_nonce";
 /// Name of the `FromKusamaInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromKusamaInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromKusamaInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = "FromKusamaInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str =
+	"FromKusamaInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Kusama headers.
diff --git a/bridges/primitives/chain-millau/src/lib.rs b/bridges/primitives/chain-millau/src/lib.rs
index e43e4f4bf5bd9fe20ae40659d6ff8142efd92ab7..950c82d5224bb461b445944d1260b500f8777087 100644
--- a/bridges/primitives/chain-millau/src/lib.rs
+++ b/bridges/primitives/chain-millau/src/lib.rs
@@ -30,9 +30,8 @@ use frame_support::{
 };
 use frame_system::limits;
 use sp_core::Hasher as HasherT;
-use sp_runtime::traits::Convert;
 use sp_runtime::{
-	traits::{IdentifyAccount, Verify},
+	traits::{Convert, IdentifyAccount, Verify},
 	MultiSignature, MultiSigner, Perbill,
 };
 use sp_std::prelude::*;
@@ -77,29 +76,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024;
 /// Weight of single regular message delivery transaction on Millau chain.
 ///
 /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
-/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
-/// The message must have dispatch weight set to zero. The result then must be rounded up to account
-/// possible future runtime upgrades.
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
 pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
 
 /// Increase of delivery transaction weight on Millau chain with every additional message byte.
 ///
-/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The
-/// result then must be rounded up to account possible future runtime upgrades.
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
 pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
 
 /// Maximal weight of single message delivery confirmation transaction on Millau chain.
 ///
-/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation
-/// for the case when single message is confirmed. The result then must be rounded up to account possible future
-/// runtime upgrades.
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
 pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Millau chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 /// The target length of a session (how often authorities change) on Millau measured in of number of
@@ -264,22 +266,28 @@ pub const WITH_RIALTO_TOKEN_SWAP_PALLET_NAME: &str = "BridgeRialtoTokenSwap";
 /// Name of the `MillauFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized";
 
-/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToMillauOutboundLaneApi::message_details` runtime method.
 pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details";
 /// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce";
+pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToMillauOutboundLaneApi_latest_received_nonce";
 /// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_generated_nonce";
+pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToMillauOutboundLaneApi_latest_generated_nonce";
 
 /// Name of the `FromMillauInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_received_nonce";
+pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromMillauInboundLaneApi_latest_received_nonce";
 /// Name of the `FromMillauInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromMillauInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromMillauInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str =
+	"FromMillauInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Millau headers.
diff --git a/bridges/primitives/chain-polkadot/src/lib.rs b/bridges/primitives/chain-polkadot/src/lib.rs
index d32165e6b79e308154f0b961de44f0c27850d227..26bad1ea8656d1e441b18a6712ca99c55e8a3e97 100644
--- a/bridges/primitives/chain-polkadot/src/lib.rs
+++ b/bridges/primitives/chain-polkadot/src/lib.rs
@@ -21,7 +21,9 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
-use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -72,8 +74,8 @@ pub const TRANSACTION_BYTE_FEE: Balance = 10 * 10_000_000_000 / 100 / 1_000;
 /// Existential deposit on Polkadot.
 pub const EXISTENTIAL_DEPOSIT: Balance = 10_000_000_000;
 
-/// The target length of a session (how often authorities change) on Polkadot measured in of number of
-/// blocks.
+/// The target length of a session (how often authorities change) on Polkadot measured in of number
+/// of blocks.
 ///
 /// Note that since this is a target sessions may change before/after this time depending on network
 /// conditions.
@@ -83,29 +85,36 @@ pub const SESSION_LENGTH: BlockNumber = 4 * time_units::HOURS;
 pub const WITH_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessages";
 
 /// Name of the KSM->DOT conversion rate stored in the Polkadot runtime.
-pub const KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME: &str = "KusamaToPolkadotConversionRate";
+pub const KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME: &str =
+	"KusamaToPolkadotConversionRate";
 
 /// Name of the `PolkadotFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized";
 /// Name of the `PolkadotFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_header";
 
-/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method.
 pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details";
 /// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce";
+pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToPolkadotOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_received_nonce";
+pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToPolkadotOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromPolkadotInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_received_nonce";
+pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromPolkadotInboundLaneApi_latest_received_nonce";
 /// Name of the `FromPolkadotInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromPolkadotInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromPolkadotInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = "FromPolkadotInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str =
+	"FromPolkadotInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Polkadot headers.
diff --git a/bridges/primitives/chain-rialto/src/lib.rs b/bridges/primitives/chain-rialto/src/lib.rs
index 6ab3431e459e65cb76098eeed2bb8bd0bdc71eff..1ded7651ccecad317f114593779574f9c8ada5f0 100644
--- a/bridges/primitives/chain-rialto/src/lib.rs
+++ b/bridges/primitives/chain-rialto/src/lib.rs
@@ -68,29 +68,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128;
 /// Weight of single regular message delivery transaction on Rialto chain.
 ///
 /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
-/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
-/// The message must have dispatch weight set to zero. The result then must be rounded up to account
-/// possible future runtime upgrades.
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
 pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
 
 /// Increase of delivery transaction weight on Rialto chain with every additional message byte.
 ///
-/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The
-/// result then must be rounded up to account possible future runtime upgrades.
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
 pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
 
 /// Maximal weight of single message delivery confirmation transaction on Rialto chain.
 ///
-/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation
-/// for the case when single message is confirmed. The result then must be rounded up to account possible future
-/// runtime upgrades.
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
 pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 /// The target length of a session (how often authorities change) on Rialto measured in of number of
@@ -231,22 +234,28 @@ pub const WITH_MILLAU_MESSAGES_PALLET_NAME: &str = "BridgeMillauMessages";
 /// Name of the `RialtoFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized";
 
-/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method.
 pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details";
 /// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce";
+pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToRialtoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_received_nonce";
+pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToRialtoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromRialtoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_received_nonce";
+pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromRialtoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromRialtoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromRialtoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromRialtoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromRialtoInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Rialto headers.
diff --git a/bridges/primitives/chain-rococo/src/lib.rs b/bridges/primitives/chain-rococo/src/lib.rs
index ce58e7ec9ab02d30220707ec366ccfdd6b025cf1..d6d97fdc5f4ef24577922f4f21575c3416f1652e 100644
--- a/bridges/primitives/chain-rococo/src/lib.rs
+++ b/bridges/primitives/chain-rococo/src/lib.rs
@@ -21,7 +21,9 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
-use frame_support::weights::{Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
+use frame_support::weights::{
+	Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -30,8 +32,8 @@ pub use bp_polkadot_core::*;
 /// Rococo Chain
 pub type Rococo = PolkadotLike;
 
-/// The target length of a session (how often authorities change) on Westend measured in of number of
-/// blocks.
+/// The target length of a session (how often authorities change) on Westend measured in of number
+/// of blocks.
 ///
 /// Note that since this is a target sessions may change before/after this time depending on network
 /// conditions.
@@ -80,28 +82,36 @@ pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_fi
 /// Name of the `RococoFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_header";
 
-/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToRococoOutboundLaneApi::message_details` runtime method.
 pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details";
 /// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce";
+pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToRococoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_received_nonce";
+pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToRococoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromRococoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_received_nonce";
+pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromRococoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromRococoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromRococoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromRococoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromRococoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromRococoInboundLaneApi_unrewarded_relayers_state";
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Rococo chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 sp_api::decl_runtime_apis! {
diff --git a/bridges/primitives/chain-westend/src/lib.rs b/bridges/primitives/chain-westend/src/lib.rs
index 0f430774307a50096347fef9aa36f482a9608c1a..480a5e56dd4c87e4d9d7106d6ae29c214802c5e0 100644
--- a/bridges/primitives/chain-westend/src/lib.rs
+++ b/bridges/primitives/chain-westend/src/lib.rs
@@ -22,7 +22,9 @@
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
 use bp_runtime::Chain;
-use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -114,25 +116,31 @@ pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_
 /// Name of the `WestendFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_header";
 
-/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToWestendOutboundLaneApi::message_details` runtime method.
 pub const TO_WESTEND_MESSAGE_DETAILS_METHOD: &str = "ToWestendOutboundLaneApi_message_details";
 /// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce";
+pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToWestendOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_received_nonce";
+pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToWestendOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromWestendInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_received_nonce";
+pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromWestendInboundLaneApi_latest_received_nonce";
 /// Name of the `FromWestendInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromWestendInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromWestendInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = "FromWestendInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str =
+	"FromWestendInboundLaneApi_unrewarded_relayers_state";
 
-/// The target length of a session (how often authorities change) on Westend measured in of number of
-/// blocks.
+/// The target length of a session (how often authorities change) on Westend measured in of number
+/// of blocks.
 ///
 /// Note that since this is a target sessions may change before/after this time depending on network
 /// conditions.
diff --git a/bridges/primitives/chain-wococo/src/lib.rs b/bridges/primitives/chain-wococo/src/lib.rs
index f962973d6c1c8f943a49f4f8793cafb008a0a597..fe2ce3a309a6bf6ab6ab16bf2390980472a8511b 100644
--- a/bridges/primitives/chain-wococo/src/lib.rs
+++ b/bridges/primitives/chain-wococo/src/lib.rs
@@ -45,22 +45,28 @@ pub const BEST_FINALIZED_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_best_fi
 /// Name of the `WococoFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_is_known_header";
 
-/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToWococoOutboundLaneApi::message_details` runtime method.
 pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details";
 /// Name of the `ToWococoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_generated_nonce";
+pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToWococoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToWococoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_received_nonce";
+pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToWococoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromWococoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_received_nonce";
+pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromWococoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromWococoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromWococoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromWococoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromWococoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromWococoInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Wococo headers.
diff --git a/bridges/primitives/currency-exchange/src/lib.rs b/bridges/primitives/currency-exchange/src/lib.rs
index c85a9d4ff7d2b2f200594f39bdadbac7169efce0..a3bdbbb2f42fb13ff43d220e1e13eb2013e30e08 100644
--- a/bridges/primitives/currency-exchange/src/lib.rs
+++ b/bridges/primitives/currency-exchange/src/lib.rs
@@ -71,7 +71,9 @@ pub trait MaybeLockFundsTransaction {
 
 	/// Parse lock funds transaction of the peer blockchain. Returns None if
 	/// transaction format is unknown, or it isn't a lock funds transaction.
-	fn parse(tx: &Self::Transaction) -> Result<LockFundsTransaction<Self::Id, Self::Recipient, Self::Amount>>;
+	fn parse(
+		tx: &Self::Transaction,
+	) -> Result<LockFundsTransaction<Self::Id, Self::Recipient, Self::Amount>>;
 }
 
 /// Map that maps recipients from peer blockchain to this blockchain recipients.
diff --git a/bridges/primitives/ethereum-poa/src/lib.rs b/bridges/primitives/ethereum-poa/src/lib.rs
index 657038a25f33bea504c6e3886640e2ddbd1a04c0..c63cf2b8f65c9558c63141b8aa99adf0de10352a 100644
--- a/bridges/primitives/ethereum-poa/src/lib.rs
+++ b/bridges/primitives/ethereum-poa/src/lib.rs
@@ -185,10 +185,7 @@ pub struct SealedEmptyStep {
 impl AuraHeader {
 	/// Compute id of this header.
 	pub fn compute_id(&self) -> HeaderId {
-		HeaderId {
-			number: self.number,
-			hash: self.compute_hash(),
-		}
+		HeaderId { number: self.number, hash: self.compute_hash() }
 	}
 
 	/// Compute hash of this header (keccak of the RLP with seal).
@@ -198,10 +195,9 @@ impl AuraHeader {
 
 	/// Get id of this header' parent. Returns None if this is genesis header.
 	pub fn parent_id(&self) -> Option<HeaderId> {
-		self.number.checked_sub(1).map(|parent_number| HeaderId {
-			number: parent_number,
-			hash: self.parent_hash,
-		})
+		self.number
+			.checked_sub(1)
+			.map(|parent_number| HeaderId { number: parent_number, hash: self.parent_hash })
 	}
 
 	/// Check if passed transactions receipts are matching receipts root in this header.
@@ -238,7 +234,7 @@ impl AuraHeader {
 				let mut message = self.compute_hash().as_bytes().to_vec();
 				message.extend_from_slice(self.seal.get(2)?);
 				keccak_256(&message).into()
-			}
+			},
 			false => keccak_256(&self.rlp(false)).into(),
 		})
 	}
@@ -255,9 +251,7 @@ impl AuraHeader {
 
 	/// Extracts the empty steps from the header seal.
 	pub fn empty_steps(&self) -> Option<Vec<SealedEmptyStep>> {
-		self.seal
-			.get(2)
-			.and_then(|x| Rlp::new(x).as_list::<SealedEmptyStep>().ok())
+		self.seal.get(2).and_then(|x| Rlp::new(x).as_list::<SealedEmptyStep>().ok())
 	}
 
 	/// Returns header RLP with or without seals.
@@ -368,15 +362,15 @@ impl Receipt {
 		match self.outcome {
 			TransactionOutcome::Unknown => {
 				s.begin_list(3);
-			}
+			},
 			TransactionOutcome::StateRoot(ref root) => {
 				s.begin_list(4);
 				s.append(root);
-			}
+			},
 			TransactionOutcome::StatusCode(ref status_code) => {
 				s.begin_list(4);
 				s.append(status_code);
-			}
+			},
 		}
 		s.append(&self.gas_used);
 		s.append(&EthBloom::from(self.log_bloom.0));
@@ -428,13 +422,13 @@ impl Decodable for SealedEmptyStep {
 impl LogEntry {
 	/// Calculates the bloom of this log entry.
 	pub fn bloom(&self) -> Bloom {
-		let eth_bloom =
-			self.topics
-				.iter()
-				.fold(EthBloom::from(BloomInput::Raw(self.address.as_bytes())), |mut b, t| {
-					b.accrue(BloomInput::Raw(t.as_bytes()));
-					b
-				});
+		let eth_bloom = self.topics.iter().fold(
+			EthBloom::from(BloomInput::Raw(self.address.as_bytes())),
+			|mut b, t| {
+				b.accrue(BloomInput::Raw(t.as_bytes()));
+				b
+			},
+		);
 		Bloom(*eth_bloom.data())
 	}
 }
@@ -498,14 +492,12 @@ pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result<Transaction, DecoderError
 	let message = unsigned.message(chain_id);
 
 	// recover tx sender
-	let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes())
-		.map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?;
+	let sender_public =
+		sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes())
+			.map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?;
 	let sender_address = public_to_address(&sender_public);
 
-	Ok(Transaction {
-		sender: sender_address,
-		unsigned,
-	})
+	Ok(Transaction { sender: sender_address, unsigned })
 }
 
 /// Convert public key into corresponding ethereum address.
@@ -519,7 +511,10 @@ pub fn public_to_address(public: &[u8; 64]) -> Address {
 /// Check ethereum merkle proof.
 /// Returns Ok(computed-root) if check succeeds.
 /// Returns Err(computed-root) if check fails.
-fn check_merkle_proof<T: AsRef<[u8]>>(expected_root: H256, items: impl Iterator<Item = T>) -> Result<H256, H256> {
+fn check_merkle_proof<T: AsRef<[u8]>>(
+	expected_root: H256,
+	items: impl Iterator<Item = T>,
+) -> Result<H256, H256> {
 	let computed_root = compute_merkle_root(items);
 	if computed_root == expected_root {
 		Ok(computed_root)
diff --git a/bridges/primitives/ethereum-poa/src/signatures.rs b/bridges/primitives/ethereum-poa/src/signatures.rs
index a4e076f2200c6217b6148a212d16d4820465f171..aaa5a980b2e2057206293b782efe4fa83953067e 100644
--- a/bridges/primitives/ethereum-poa/src/signatures.rs
+++ b/bridges/primitives/ethereum-poa/src/signatures.rs
@@ -23,8 +23,8 @@
 pub use secp256k1::SecretKey;
 
 use crate::{
-	public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, UnsignedTransaction, H256,
-	H520, U256,
+	public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction,
+	UnsignedTransaction, H256, H520, U256,
 };
 
 use secp256k1::{Message, PublicKey};
@@ -80,7 +80,8 @@ impl SignTransaction for UnsignedTransaction {
 
 /// Return author's signature over given message.
 pub fn sign(author: &SecretKey, message: H256) -> H520 {
-	let (signature, recovery_id) = secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author);
+	let (signature, recovery_id) =
+		secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author);
 	let mut raw_signature = [0u8; 65];
 	raw_signature[..64].copy_from_slice(&signature.serialize());
 	raw_signature[64] = recovery_id.serialize();
@@ -116,10 +117,7 @@ mod tests {
 		let raw_tx = unsigned.clone().sign_by(&signer, Some(42));
 		assert_eq!(
 			transaction_decode_rlp(&raw_tx),
-			Ok(Transaction {
-				sender: signer_address,
-				unsigned,
-			}),
+			Ok(Transaction { sender: signer_address, unsigned }),
 		);
 
 		// case2: without chain_id replay protection + contract creation
@@ -134,10 +132,7 @@ mod tests {
 		let raw_tx = unsigned.clone().sign_by(&signer, None);
 		assert_eq!(
 			transaction_decode_rlp(&raw_tx),
-			Ok(Transaction {
-				sender: signer_address,
-				unsigned,
-			}),
+			Ok(Transaction { sender: signer_address, unsigned }),
 		);
 	}
 }
diff --git a/bridges/primitives/header-chain/src/justification.rs b/bridges/primitives/header-chain/src/justification.rs
index 760369fe0d87072cf15425a0f90a86495f585400..2d092496e8d5407551b77c7ec568860477016b54 100644
--- a/bridges/primitives/header-chain/src/justification.rs
+++ b/bridges/primitives/header-chain/src/justification.rs
@@ -24,8 +24,10 @@ use finality_grandpa::voter_set::VoterSet;
 use frame_support::RuntimeDebug;
 use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId};
 use sp_runtime::traits::Header as HeaderT;
-use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
-use sp_std::prelude::*;
+use sp_std::{
+	collections::{btree_map::BTreeMap, btree_set::BTreeSet},
+	prelude::*,
+};
 
 /// A GRANDPA Justification is a proof that a given header was finalized
 /// at a certain height and with a certain set of authorities.
@@ -37,7 +39,8 @@ pub struct GrandpaJustification<Header: HeaderT> {
 	/// The round (voting period) this justification is valid for.
 	pub round: u64,
 	/// The set of votes for the chain which is to be finalized.
-	pub commit: finality_grandpa::Commit<Header::Hash, Header::Number, AuthoritySignature, AuthorityId>,
+	pub commit:
+		finality_grandpa::Commit<Header::Hash, Header::Number, AuthoritySignature, AuthorityId>,
 	/// A proof that the chain of blocks in the commit are related to each other.
 	pub votes_ancestries: Vec<Header>,
 }
@@ -57,7 +60,8 @@ pub enum Error {
 	InvalidJustificationTarget,
 	/// The authority has provided an invalid signature.
 	InvalidAuthoritySignature,
-	/// The justification contains precommit for header that is not a descendant of the commit header.
+	/// The justification contains precommit for header that is not a descendant of the commit
+	/// header.
 	PrecommitIsNotCommitDescendant,
 	/// The cumulative weight of all votes in the justification is not enough to justify commit
 	/// header finalization.
@@ -87,7 +91,7 @@ where
 {
 	// ensure that it is justification for the expected header
 	if (justification.commit.target_hash, justification.commit.target_number) != finalized_target {
-		return Err(Error::InvalidJustificationTarget);
+		return Err(Error::InvalidJustificationTarget)
 	}
 
 	let mut chain = AncestryChain::new(&justification.votes_ancestries);
@@ -99,30 +103,32 @@ where
 		let authority_info = match authorities_set.get(&signed.id) {
 			Some(authority_info) => authority_info,
 			None => {
-				// just ignore precommit from unknown authority as `finality_grandpa::import_precommit` does
-				continue;
-			}
+				// just ignore precommit from unknown authority as
+				// `finality_grandpa::import_precommit` does
+				continue
+			},
 		};
 
 		// check if authority has already voted in the same round.
 		//
 		// there's a lot of code in `validate_commit` and `import_precommit` functions inside
-		// `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing that we
-		// care about is that only first vote from the authority is accepted
+		// `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing
+		// that we care about is that only first vote from the authority is accepted
 		if !votes.insert(signed.id.clone()) {
-			continue;
+			continue
 		}
 
 		// everything below this line can't just `continue`, because state is already altered
 
 		// all precommits must be for block higher than the target
 		if signed.precommit.target_number < justification.commit.target_number {
-			return Err(Error::PrecommitIsNotCommitDescendant);
+			return Err(Error::PrecommitIsNotCommitDescendant)
 		}
 		// all precommits must be for target block descendents
-		chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
-		// since we know now that the precommit target is the descendant of the justification target,
-		// we may increase 'weight' of the justification target
+		chain = chain
+			.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
+		// since we know now that the precommit target is the descendant of the justification
+		// target, we may increase 'weight' of the justification target
 		//
 		// there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate,
 		// but in the end it is only used to find GHOST, which we don't care about. The only thing
@@ -144,13 +150,13 @@ where
 			authorities_set_id,
 			&mut signature_buffer,
 		) {
-			return Err(Error::InvalidAuthoritySignature);
+			return Err(Error::InvalidAuthoritySignature)
 		}
 	}
 
 	// check that there are no extra headers in the justification
 	if !chain.unvisited.is_empty() {
-		return Err(Error::ExtraHeadersInVotesAncestries);
+		return Err(Error::ExtraHeadersInVotesAncestries)
 	}
 
 	// check that the cumulative weight of validators voted for the justification target (or one
@@ -186,7 +192,8 @@ impl<Header: HeaderT> AncestryChain<Header> {
 		AncestryChain { parents, unvisited }
 	}
 
-	/// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and `Ok(_)` otherwise.
+	/// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and
+	/// `Ok(_)` otherwise.
 	pub fn ensure_descendant(
 		mut self,
 		commit_target: &Header::Hash,
@@ -195,22 +202,22 @@ impl<Header: HeaderT> AncestryChain<Header> {
 		let mut current_hash = *precommit_target;
 		loop {
 			if current_hash == *commit_target {
-				break;
+				break
 			}
 
 			let is_visited_before = !self.unvisited.remove(&current_hash);
 			current_hash = match self.parents.get(&current_hash) {
 				Some(parent_hash) => {
 					if is_visited_before {
-						// `Some(parent_hash)` means that the `current_hash` is in the `parents` container
-						// `is_visited_before` means that it has been visited before in some of previous calls
-						// => since we assume that previous call has finished with `true`, this also will
-						//    be finished with `true`
-						return Ok(self);
+						// `Some(parent_hash)` means that the `current_hash` is in the `parents`
+						// container `is_visited_before` means that it has been visited before in
+						// some of previous calls => since we assume that previous call has finished
+						// with `true`, this also will    be finished with `true`
+						return Ok(self)
 					}
 
 					*parent_hash
-				}
+				},
 				None => return Err(Error::PrecommitIsNotCommitDescendant),
 			};
 		}
diff --git a/bridges/primitives/header-chain/src/lib.rs b/bridges/primitives/header-chain/src/lib.rs
index f1eaa28d071f0752ec8e512a8c243d11461b3697..66dd42e14829889491008dc71afc5538e50873a4 100644
--- a/bridges/primitives/header-chain/src/lib.rs
+++ b/bridges/primitives/header-chain/src/lib.rs
@@ -20,15 +20,11 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use codec::{Codec, Decode, Encode, EncodeLike};
-use core::clone::Clone;
-use core::cmp::Eq;
-use core::default::Default;
-use core::fmt::Debug;
+use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
 use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID};
-use sp_runtime::RuntimeDebug;
-use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT};
+use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT, RuntimeDebug};
 use sp_std::boxed::Box;
 
 pub mod justification;
@@ -82,7 +78,9 @@ pub trait InclusionProofVerifier {
 	/// Verify that transaction is a part of given block.
 	///
 	/// Returns Some(transaction) if proof is valid and None otherwise.
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction>;
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction>;
 }
 
 /// A trait for pallets which want to keep track of finalized headers from a bridged chain.
diff --git a/bridges/primitives/header-chain/tests/implementation_match.rs b/bridges/primitives/header-chain/tests/implementation_match.rs
index 0b55c19035287bd72932d66c8c8b6e5b6ba416cd..51275bbd645e50d45759df82672b1002eea161a2 100644
--- a/bridges/primitives/header-chain/tests/implementation_match.rs
+++ b/bridges/primitives/header-chain/tests/implementation_match.rs
@@ -23,8 +23,8 @@
 use assert_matches::assert_matches;
 use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification};
 use bp_test_utils::{
-	header_id, make_justification_for_header, signed_precommit, test_header, Account, JustificationGeneratorParams,
-	ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID,
+	header_id, make_justification_for_header, signed_precommit, test_header, Account,
+	JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID,
 };
 use finality_grandpa::voter_set::VoterSet;
 use sp_finality_grandpa::{AuthorityId, AuthorityWeight};
@@ -44,18 +44,22 @@ impl AncestryChain {
 }
 
 impl finality_grandpa::Chain<TestHash, TestNumber> for AncestryChain {
-	fn ancestry(&self, base: TestHash, block: TestHash) -> Result<Vec<TestHash>, finality_grandpa::Error> {
+	fn ancestry(
+		&self,
+		base: TestHash,
+		block: TestHash,
+	) -> Result<Vec<TestHash>, finality_grandpa::Error> {
 		let mut route = Vec::new();
 		let mut current_hash = block;
 		loop {
 			if current_hash == base {
-				break;
+				break
 			}
 			match self.0.parents.get(&current_hash).cloned() {
 				Some(parent_hash) => {
 					current_hash = parent_hash;
 					route.push(current_hash);
-				}
+				},
 				_ => return Err(finality_grandpa::Error::NotDescendent),
 			}
 		}
@@ -81,14 +85,11 @@ fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> {
 	vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]
 }
 
-/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a header finality.
+/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a
+/// header finality.
 pub fn minimal_voter_set() -> VoterSet<AuthorityId> {
-	VoterSet::new(
-		minimal_accounts_set()
-			.iter()
-			.map(|(id, w)| (AuthorityId::from(*id), *w)),
-	)
-	.unwrap()
+	VoterSet::new(minimal_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w)))
+		.unwrap()
 }
 
 /// Make a valid GRANDPA justification with sensible defaults.
@@ -174,14 +175,8 @@ fn same_result_when_justification_contains_duplicate_vote() {
 	let mut justification = make_default_justification(&test_header(1));
 	// the justification may contain exactly the same vote (i.e. same precommit and same signature)
 	// multiple times && it isn't treated as an error by original implementation
-	justification
-		.commit
-		.precommits
-		.push(justification.commit.precommits[0].clone());
-	justification
-		.commit
-		.precommits
-		.push(justification.commit.precommits[0].clone());
+	justification.commit.precommits.push(justification.commit.precommits[0].clone());
+	justification.commit.precommits.push(justification.commit.precommits[0].clone());
 
 	// our implementation succeeds
 	assert_eq!(
diff --git a/bridges/primitives/message-dispatch/src/lib.rs b/bridges/primitives/message-dispatch/src/lib.rs
index 794091c789194104dfef86e3082a863c091f11f1..5f39197b1175180bdae8dab1476557ea15021fad 100644
--- a/bridges/primitives/message-dispatch/src/lib.rs
+++ b/bridges/primitives/message-dispatch/src/lib.rs
@@ -112,7 +112,12 @@ pub enum CallOrigin<SourceChainAccountId, TargetChainAccountPublic, TargetChainS
 
 /// Message payload type used by dispatch module.
 #[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq)]
-pub struct MessagePayload<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call> {
+pub struct MessagePayload<
+	SourceChainAccountId,
+	TargetChainAccountPublic,
+	TargetChainSignature,
+	Call,
+> {
 	/// Runtime specification version. We only dispatch messages that have the same
 	/// runtime version. Otherwise we risk to misinterpret encoded calls.
 	pub spec_version: SpecVersion,
diff --git a/bridges/primitives/messages/src/lib.rs b/bridges/primitives/messages/src/lib.rs
index f639cea933a0bb2a1f974e3bba0e6155bd92101b..3cf07f6be9a02e7fe3c79fa2be6189b85b8dd2f4 100644
--- a/bridges/primitives/messages/src/lib.rs
+++ b/bridges/primitives/messages/src/lib.rs
@@ -110,22 +110,23 @@ pub struct Message<Fee> {
 /// Inbound lane data.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
 pub struct InboundLaneData<RelayerId> {
-	/// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce).
+	/// Identifiers of relayers and messages that they have delivered to this lane (ordered by
+	/// message nonce).
 	///
 	/// This serves as a helper storage item, to allow the source chain to easily pay rewards
 	/// to the relayers who successfully delivered messages to the target chain (inbound lane).
 	///
 	/// It is guaranteed to have at most N entries, where N is configured at the module level.
 	/// If there are N entries in this vec, then:
-	/// 1) all incoming messages are rejected if they're missing corresponding `proof-of(outbound-lane.state)`;
-	/// 2) all incoming messages are rejected if `proof-of(outbound-lane.state).last_delivered_nonce` is
-	///    equal to `self.last_confirmed_nonce`.
-	/// Given what is said above, all nonces in this queue are in range:
-	/// `(self.last_confirmed_nonce; self.last_delivered_nonce()]`.
+	/// 1) all incoming messages are rejected if they're missing corresponding
+	/// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if
+	/// `proof-of(outbound-lane.state).last_delivered_nonce` is    equal to
+	/// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in
+	/// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`.
 	///
 	/// When a relayer sends a single message, both of MessageNonces are the same.
-	/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce.
-	/// Multiple dispatches from the same relayer are allowed.
+	/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the
+	/// highest nonce. Multiple dispatches from the same relayer are allowed.
 	pub relayers: VecDeque<UnrewardedRelayer<RelayerId>>,
 
 	/// Nonce of the last message that
@@ -141,10 +142,7 @@ pub struct InboundLaneData<RelayerId> {
 
 impl<RelayerId> Default for InboundLaneData<RelayerId> {
 	fn default() -> Self {
-		InboundLaneData {
-			relayers: VecDeque::new(),
-			last_confirmed_nonce: 0,
-		}
+		InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 }
 	}
 }
 
@@ -153,12 +151,17 @@ impl<RelayerId> InboundLaneData<RelayerId> {
 	/// size of each entry.
 	///
 	/// Returns `None` if size overflows `u32` limits.
-	pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32, messages_count: u32) -> Option<u32> {
+	pub fn encoded_size_hint(
+		relayer_id_encoded_size: u32,
+		relayers_entries: u32,
+		messages_count: u32,
+	) -> Option<u32> {
 		let message_nonce_size = 8;
 		let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?;
 		let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?;
 		let dispatch_results_per_byte = 8;
-		let dispatch_result_size = sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte);
+		let dispatch_result_size =
+			sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte);
 		relayers_size
 			.checked_add(message_nonce_size)
 			.and_then(|result| result.checked_add(dispatch_result_size))
@@ -193,8 +196,8 @@ pub type DispatchResultsBitVec = BitVec<Msb0, u8>;
 
 /// Unrewarded relayer entry stored in the inbound lane data.
 ///
-/// This struct represents a continuous range of messages that have been delivered by the same relayer
-/// and whose confirmations are still pending.
+/// This struct represents a continuous range of messages that have been delivered by the same
+/// relayer and whose confirmations are still pending.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
 pub struct UnrewardedRelayer<RelayerId> {
 	/// Identifier of the relayer.
@@ -217,7 +220,8 @@ pub struct DeliveredMessages {
 }
 
 impl DeliveredMessages {
-	/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given dispatch result.
+	/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given
+	/// dispatch result.
 	pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self {
 		DeliveredMessages {
 			begin: nonce,
@@ -277,8 +281,8 @@ pub struct UnrewardedRelayersState {
 /// Outbound lane data.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
 pub struct OutboundLaneData {
-	/// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated message if
-	/// all sent messages are already pruned.
+	/// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated
+	/// message if all sent messages are already pruned.
 	pub oldest_unpruned_nonce: MessageNonce,
 	/// Nonce of the latest message, received by bridged chain.
 	pub latest_received_nonce: MessageNonce,
@@ -289,7 +293,8 @@ pub struct OutboundLaneData {
 impl Default for OutboundLaneData {
 	fn default() -> Self {
 		OutboundLaneData {
-			// it is 1 because we're pruning everything in [oldest_unpruned_nonce; latest_received_nonce]
+			// it is 1 because we're pruning everything in [oldest_unpruned_nonce;
+			// latest_received_nonce]
 			oldest_unpruned_nonce: 1,
 			latest_received_nonce: 0,
 			latest_generated_nonce: 0,
@@ -300,7 +305,9 @@ impl Default for OutboundLaneData {
 /// Returns total number of messages in the `InboundLaneData::relayers` vector.
 ///
 /// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`).
-pub fn total_unrewarded_messages<RelayerId>(relayers: &VecDeque<UnrewardedRelayer<RelayerId>>) -> Option<MessageNonce> {
+pub fn total_unrewarded_messages<RelayerId>(
+	relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
+) -> Option<MessageNonce> {
 	match (relayers.front(), relayers.back()) {
 		(Some(front), Some(back)) => {
 			if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) {
@@ -308,7 +315,7 @@ pub fn total_unrewarded_messages<RelayerId>(relayers: &VecDeque<UnrewardedRelaye
 			} else {
 				Some(0)
 			}
-		}
+		},
 		_ => Some(0),
 	}
 }
@@ -322,10 +329,7 @@ mod tests {
 		assert_eq!(
 			total_unrewarded_messages(
 				&vec![
-					UnrewardedRelayer {
-						relayer: 1,
-						messages: DeliveredMessages::new(0, true)
-					},
+					UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0, true) },
 					UnrewardedRelayer {
 						relayer: 2,
 						messages: DeliveredMessages::new(MessageNonce::MAX, true)
@@ -349,7 +353,11 @@ mod tests {
 			(13u8, 128u8),
 		];
 		for (relayer_entries, messages_count) in test_cases {
-			let expected_size = InboundLaneData::<u8>::encoded_size_hint(1, relayer_entries as _, messages_count as _);
+			let expected_size = InboundLaneData::<u8>::encoded_size_hint(
+				1,
+				relayer_entries as _,
+				messages_count as _,
+			);
 			let actual_size = InboundLaneData {
 				relayers: (1u8..=relayer_entries)
 					.map(|i| {
@@ -383,11 +391,8 @@ mod tests {
 
 	#[test]
 	fn message_dispatch_result_works() {
-		let delivered_messages = DeliveredMessages {
-			begin: 100,
-			end: 150,
-			dispatch_results: bitvec![Msb0, u8; 1; 151],
-		};
+		let delivered_messages =
+			DeliveredMessages { begin: 100, end: 150, dispatch_results: bitvec![Msb0, u8; 1; 151] };
 
 		assert!(!delivered_messages.contains_message(99));
 		assert!(delivered_messages.contains_message(100));
diff --git a/bridges/primitives/messages/src/source_chain.rs b/bridges/primitives/messages/src/source_chain.rs
index 326ab4f0f8ccbef78f1013ddccfe07d1cb5c53f3..dbe6a5922158dc09a55f56fc94c7d39a082c6ae0 100644
--- a/bridges/primitives/messages/src/source_chain.rs
+++ b/bridges/primitives/messages/src/source_chain.rs
@@ -81,7 +81,8 @@ pub trait LaneMessageVerifier<Submitter, Payload, Fee> {
 	/// Error type.
 	type Error: Debug + Into<&'static str>;
 
-	/// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the lane.
+	/// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the
+	/// lane.
 	fn verify_message(
 		submitter: &Sender<Submitter>,
 		delivery_and_dispatch_fee: &Fee,
@@ -190,7 +191,8 @@ impl OnMessageAccepted for () {
 pub struct ForbidOutboundMessages;
 
 /// Error message that is used in `ForbidOutboundMessages` implementation.
-const ALL_OUTBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all outbound messages";
+const ALL_OUTBOUND_MESSAGES_REJECTED: &str =
+	"This chain is configured to reject all outbound messages";
 
 impl<Payload, AccountId> TargetHeaderChain<Payload, AccountId> for ForbidOutboundMessages {
 	type Error = &'static str;
@@ -208,7 +210,9 @@ impl<Payload, AccountId> TargetHeaderChain<Payload, AccountId> for ForbidOutboun
 	}
 }
 
-impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee> for ForbidOutboundMessages {
+impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee>
+	for ForbidOutboundMessages
+{
 	type Error = &'static str;
 
 	fn verify_message(
@@ -222,7 +226,9 @@ impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee> for F
 	}
 }
 
-impl<AccountId, Balance> MessageDeliveryAndDispatchPayment<AccountId, Balance> for ForbidOutboundMessages {
+impl<AccountId, Balance> MessageDeliveryAndDispatchPayment<AccountId, Balance>
+	for ForbidOutboundMessages
+{
 	type Error = &'static str;
 
 	fn pay_delivery_and_dispatch_fee(
diff --git a/bridges/primitives/messages/src/target_chain.rs b/bridges/primitives/messages/src/target_chain.rs
index 38ce9e800da43f9bc20f5f3408f99b192d97d568..e3f458ac8fce5815c38d3851dd4d3ca0e4ab8e0b 100644
--- a/bridges/primitives/messages/src/target_chain.rs
+++ b/bridges/primitives/messages/src/target_chain.rs
@@ -111,23 +111,19 @@ pub trait MessageDispatch<AccountId, Fee> {
 
 impl<Message> Default for ProvedLaneMessages<Message> {
 	fn default() -> Self {
-		ProvedLaneMessages {
-			lane_state: None,
-			messages: Vec::new(),
-		}
+		ProvedLaneMessages { lane_state: None, messages: Vec::new() }
 	}
 }
 
 impl<DispatchPayload: Decode, Fee> From<Message<Fee>> for DispatchMessage<DispatchPayload, Fee> {
 	fn from(message: Message<Fee>) -> Self {
-		DispatchMessage {
-			key: message.key,
-			data: message.data.into(),
-		}
+		DispatchMessage { key: message.key, data: message.data.into() }
 	}
 }
 
-impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>> for DispatchMessageData<DispatchPayload, Fee> {
+impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>>
+	for DispatchMessageData<DispatchPayload, Fee>
+{
 	fn from(data: MessageData<Fee>) -> Self {
 		DispatchMessageData {
 			payload: DispatchPayload::decode(&mut &data.payload[..]),
@@ -141,7 +137,8 @@ impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>> for DispatchMessageDat
 pub struct ForbidInboundMessages;
 
 /// Error message that is used in `ForbidOutboundMessages` implementation.
-const ALL_INBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all inbound messages";
+const ALL_INBOUND_MESSAGES_REJECTED: &str =
+	"This chain is configured to reject all inbound messages";
 
 impl<Fee> SourceHeaderChain<Fee> for ForbidInboundMessages {
 	type Error = &'static str;
@@ -162,7 +159,10 @@ impl<AccountId, Fee> MessageDispatch<AccountId, Fee> for ForbidInboundMessages {
 		Weight::MAX
 	}
 
-	fn dispatch(_: &AccountId, _: DispatchMessage<Self::DispatchPayload, Fee>) -> MessageDispatchResult {
+	fn dispatch(
+		_: &AccountId,
+		_: DispatchMessage<Self::DispatchPayload, Fee>,
+	) -> MessageDispatchResult {
 		MessageDispatchResult {
 			dispatch_result: false,
 			unspent_weight: 0,
diff --git a/bridges/primitives/polkadot-core/src/lib.rs b/bridges/primitives/polkadot-core/src/lib.rs
index a36f7ae65240c96aa12c7b464e399c5807ad3f27..8f5fab60ab3ffaaf68770d49d32d77bf02be22c9 100644
--- a/bridges/primitives/polkadot-core/src/lib.rs
+++ b/bridges/primitives/polkadot-core/src/lib.rs
@@ -76,8 +76,9 @@ const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
 /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
 pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND;
 
-/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on average,
-/// hence a single extrinsic will not be allowed to consume more than `AvailableBlockRatio - 1 percent`.
+/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on
+/// average, hence a single extrinsic will not be allowed to consume more than
+/// `AvailableBlockRatio - 1 percent`.
 ///
 /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
 pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1);
@@ -113,7 +114,8 @@ parameter_types! {
 		.build_or_panic();
 }
 
-/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can use.
+/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can
+/// use.
 pub fn max_extrinsic_weight() -> Weight {
 	BlockWeights::get()
 		.get(DispatchClass::Normal)
@@ -144,18 +146,21 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192;
 
 /// Maximal weight of single message delivery confirmation transaction on Polkadot-like chain.
 ///
-/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula
-/// computation for the case when single message is confirmed. The result then must be rounded up to account possible
-/// future runtime upgrades.
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
 pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
 
-/// Increase of delivery transaction weight on Polkadot-like chain with every additional message byte.
+/// Increase of delivery transaction weight on Polkadot-like chain with every additional message
+/// byte.
 ///
-/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The
-/// result then must be rounded up to account possible future runtime upgrades.
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
 pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
 
-/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded call itself.
+/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded
+/// call itself.
 ///
 /// Can be computed by subtracting encoded call size from raw transaction size.
 pub const TX_EXTRA_BYTES: u32 = 256;
@@ -163,16 +168,18 @@ pub const TX_EXTRA_BYTES: u32 = 256;
 /// Weight of single regular message delivery transaction on Polkadot-like chain.
 ///
 /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
-/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
-/// The message must have dispatch weight set to zero. The result then must be rounded up to account
-/// possible future runtime upgrades.
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
 pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Polkadot-like chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 /// Re-export `time_units` to make usage easier.
@@ -240,15 +247,7 @@ pub type UncheckedExtrinsic<Call> =
 pub type Address = MultiAddress<AccountId, ()>;
 
 /// A type of the data encoded as part of the transaction.
-pub type SignedExtra = (
-	(),
-	(),
-	(),
-	sp_runtime::generic::Era,
-	Compact<Nonce>,
-	(),
-	Compact<Balance>,
-);
+pub type SignedExtra = ((), (), (), sp_runtime::generic::Era, Compact<Nonce>, (), Compact<Balance>);
 
 /// Parameters which are part of the payload used to produce transaction signature,
 /// but don't end up in the transaction itself (i.e. inherent part of the runtime).
@@ -270,7 +269,9 @@ impl<Call> parity_scale_codec::Encode for SignedExtensions<Call> {
 }
 
 impl<Call> parity_scale_codec::Decode for SignedExtensions<Call> {
-	fn decode<I: parity_scale_codec::Input>(_input: &mut I) -> Result<Self, parity_scale_codec::Error> {
+	fn decode<I: parity_scale_codec::Input>(
+		_input: &mut I,
+	) -> Result<Self, parity_scale_codec::Error> {
 		unimplemented!("SignedExtensions are never meant to be decoded, they are only used to create transaction");
 	}
 }
@@ -331,7 +332,9 @@ where
 	type AdditionalSigned = AdditionalSigned;
 	type Pre = ();
 
-	fn additional_signed(&self) -> Result<Self::AdditionalSigned, frame_support::unsigned::TransactionValidityError> {
+	fn additional_signed(
+		&self,
+	) -> Result<Self::AdditionalSigned, frame_support::unsigned::TransactionValidityError> {
 		Ok(self.additional_signed)
 	}
 }
@@ -372,7 +375,9 @@ pub fn account_info_storage_key(id: &AccountId) -> Vec<u8> {
 	let storage_prefix_hashed = Twox128::hash(b"Account");
 	let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash);
 
-	let mut final_key = Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len());
+	let mut final_key = Vec::with_capacity(
+		module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(),
+	);
 
 	final_key.extend_from_slice(&module_prefix_hashed[..]);
 	final_key.extend_from_slice(&storage_prefix_hashed[..]);
@@ -400,8 +405,8 @@ mod tests {
 	#[test]
 	fn should_generate_storage_key() {
 		let acc = [
-			1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-			30, 31, 32,
+			1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+			25, 26, 27, 28, 29, 30, 31, 32,
 		]
 		.into();
 		let key = account_info_storage_key(&acc);
diff --git a/bridges/primitives/runtime/src/chain.rs b/bridges/primitives/runtime/src/chain.rs
index 2d7325641bac5dd26ba5ff723fbf2420b32fcded..e24694bf8b0f86ea1c34f3f3a6c0e13eb0a61504 100644
--- a/bridges/primitives/runtime/src/chain.rs
+++ b/bridges/primitives/runtime/src/chain.rs
@@ -18,8 +18,8 @@ use frame_support::Parameter;
 use num_traits::{AsPrimitive, Bounded, CheckedSub, SaturatingAdd, Zero};
 use sp_runtime::{
 	traits::{
-		AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeMallocSizeOf,
-		MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify,
+		AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay,
+		MaybeMallocSizeOf, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify,
 	},
 	FixedPointOperand,
 };
@@ -77,10 +77,18 @@ pub trait Chain: Send + Sync + 'static {
 	/// A type that fulfills the abstract idea of what a Substrate header is.
 	// See here for more info:
 	// https://crates.parity.io/sp_runtime/traits/trait.Header.html
-	type Header: Parameter + HeaderT<Number = Self::BlockNumber, Hash = Self::Hash> + MaybeSerializeDeserialize;
+	type Header: Parameter
+		+ HeaderT<Number = Self::BlockNumber, Hash = Self::Hash>
+		+ MaybeSerializeDeserialize;
 
 	/// The user account identifier type for the runtime.
-	type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default;
+	type AccountId: Parameter
+		+ Member
+		+ MaybeSerializeDeserialize
+		+ Debug
+		+ MaybeDisplay
+		+ Ord
+		+ Default;
 	/// Balance of an account in native tokens.
 	///
 	/// The chain may support multiple tokens, but this particular type is for token that is used
diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs
index aee894aceefcae87199addc325cfc2b7e21037d7..fe3309c83bf414e2eaca1ef3af86c9eda65a71b2 100644
--- a/bridges/primitives/runtime/src/lib.rs
+++ b/bridges/primitives/runtime/src/lib.rs
@@ -25,8 +25,8 @@ use sp_io::hashing::blake2_256;
 use sp_std::{convert::TryFrom, vec::Vec};
 
 pub use chain::{
-	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf, IndexOf, SignatureOf,
-	TransactionEraOf,
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf,
+	IndexOf, SignatureOf, TransactionEraOf,
 };
 pub use storage_proof::{Error as StorageProofError, StorageProofChecker};
 
@@ -72,8 +72,9 @@ pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-deriva
 ///
 /// In addition to its main function (identifying the chain), this type may also be used to
 /// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g.
-/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and Chain2.
-/// Sometimes we need to be able to identify deployed instance dynamically. This type may be used for that.
+/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and
+/// Chain2. Sometimes we need to be able to identify deployed instance dynamically. This type may be
+/// used for that.
 pub type ChainId = [u8; 4];
 
 /// Type of accounts on the source chain.
@@ -103,8 +104,10 @@ where
 	AccountId: Encode,
 {
 	match id {
-		SourceAccount::Root => (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256),
-		SourceAccount::Account(id) => (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256),
+		SourceAccount::Root =>
+			(ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256),
+		SourceAccount::Account(id) =>
+			(ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256),
 	}
 	.into()
 }
@@ -113,8 +116,8 @@ where
 ///
 /// This account is used to collect fees for relayers that are passing messages across the bridge.
 ///
-/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same
-/// `bridge_id` is used.
+/// The account ID can be the same across different instances of `pallet-bridge-messages` if the
+/// same `bridge_id` is used.
 pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 {
 	("relayer-fund-account", bridge_id).using_encoded(blake2_256).into()
 }
@@ -154,9 +157,15 @@ pub enum TransactionEra<BlockNumber, BlockHash> {
 
 impl<BlockNumber: Copy + Into<u64>, BlockHash: Copy> TransactionEra<BlockNumber, BlockHash> {
 	/// Prepare transaction era, based on mortality period and current best block number.
-	pub fn new(best_block_number: BlockNumber, best_block_hash: BlockHash, mortality_period: Option<u32>) -> Self {
+	pub fn new(
+		best_block_number: BlockNumber,
+		best_block_hash: BlockHash,
+		mortality_period: Option<u32>,
+	) -> Self {
 		mortality_period
-			.map(|mortality_period| TransactionEra::Mortal(best_block_number, best_block_hash, mortality_period))
+			.map(|mortality_period| {
+				TransactionEra::Mortal(best_block_number, best_block_hash, mortality_period)
+			})
 			.unwrap_or(TransactionEra::Immortal)
 	}
 
@@ -169,9 +178,8 @@ impl<BlockNumber: Copy + Into<u64>, BlockHash: Copy> TransactionEra<BlockNumber,
 	pub fn frame_era(&self) -> sp_runtime::generic::Era {
 		match *self {
 			TransactionEra::Immortal => sp_runtime::generic::Era::immortal(),
-			TransactionEra::Mortal(header_number, _, period) => {
-				sp_runtime::generic::Era::mortal(period as _, header_number.into())
-			}
+			TransactionEra::Mortal(header_number, _, period) =>
+				sp_runtime::generic::Era::mortal(period as _, header_number.into()),
 		}
 	}
 
@@ -184,25 +192,40 @@ impl<BlockNumber: Copy + Into<u64>, BlockHash: Copy> TransactionEra<BlockNumber,
 	}
 }
 
-/// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`
-/// for `Blake2_128Concat` maps.
+/// This is a copypaste of the
+/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Blake2_128Concat`
+/// maps.
 ///
 /// We're using it because to call `storage_map_final_key` directly, we need access to the runtime
 /// and pallet instance, which (sometimes) is impossible.
-pub fn storage_map_final_key_blake2_128concat(pallet_prefix: &str, map_name: &str, key: &[u8]) -> StorageKey {
-	storage_map_final_key_identity(pallet_prefix, map_name, &frame_support::Blake2_128Concat::hash(key))
+pub fn storage_map_final_key_blake2_128concat(
+	pallet_prefix: &str,
+	map_name: &str,
+	key: &[u8],
+) -> StorageKey {
+	storage_map_final_key_identity(
+		pallet_prefix,
+		map_name,
+		&frame_support::Blake2_128Concat::hash(key),
+	)
 }
 
-/// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`
-/// for `Identity` maps.
+/// This is a copypaste of the
+/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Identity` maps.
 ///
 /// We're using it because to call `storage_map_final_key` directly, we need access to the runtime
 /// and pallet instance, which (sometimes) is impossible.
-pub fn storage_map_final_key_identity(pallet_prefix: &str, map_name: &str, key_hashed: &[u8]) -> StorageKey {
+pub fn storage_map_final_key_identity(
+	pallet_prefix: &str,
+	map_name: &str,
+	key_hashed: &[u8],
+) -> StorageKey {
 	let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes());
 	let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());
 
-	let mut final_key = Vec::with_capacity(pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len());
+	let mut final_key = Vec::with_capacity(
+		pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(),
+	);
 
 	final_key.extend_from_slice(&pallet_prefix_hashed[..]);
 	final_key.extend_from_slice(&storage_prefix_hashed[..]);
@@ -211,7 +234,8 @@ pub fn storage_map_final_key_identity(pallet_prefix: &str, map_name: &str, key_h
 	StorageKey(final_key)
 }
 
-/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; }`) is computed.
+/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false;
+/// }`) is computed.
 ///
 /// Copypaste from `frame_support::parameter_types` macro
 pub fn storage_parameter_key(parameter_name: &str) -> StorageKey {
diff --git a/bridges/primitives/runtime/src/messages.rs b/bridges/primitives/runtime/src/messages.rs
index 1b00c10fa4930945300c2605d072f0f4fe839ce4..c351d771cf8710c54cdfb33962ec91a8f8fbcefe 100644
--- a/bridges/primitives/runtime/src/messages.rs
+++ b/bridges/primitives/runtime/src/messages.rs
@@ -50,7 +50,7 @@ pub struct MessageDispatchResult {
 	/// 2) if message has not been dispatched at all.
 	pub unspent_weight: Weight,
 	/// Whether the message dispatch fee has been paid during dispatch. This will be true if your
-	/// configuration supports pay-dispatch-fee-at-target-chain option and message sender has enabled
-	/// this option.
+	/// configuration supports pay-dispatch-fee-at-target-chain option and message sender has
+	/// enabled this option.
 	pub dispatch_fee_paid_during_dispatch: bool,
 }
diff --git a/bridges/primitives/runtime/src/storage_proof.rs b/bridges/primitives/runtime/src/storage_proof.rs
index d70be93b1d2513648011351be1bcfd854095325b..9cc5b48ebd913319e4be1f29be5ea8dbcb268e60 100644
--- a/bridges/primitives/runtime/src/storage_proof.rs
+++ b/bridges/primitives/runtime/src/storage_proof.rs
@@ -42,7 +42,7 @@ where
 	pub fn new(root: H::Out, proof: StorageProof) -> Result<Self, Error> {
 		let db = proof.into_memory_db();
 		if !db.contains(&root, EMPTY_PREFIX) {
-			return Err(Error::StorageRootMismatch);
+			return Err(Error::StorageRootMismatch)
 		}
 
 		let checker = StorageProofChecker { root, db };
@@ -52,7 +52,8 @@ where
 	/// Reads a value from the available subset of storage. If the value cannot be read due to an
 	/// incomplete or otherwise invalid proof, this returns an error.
 	pub fn read_value(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
-		read_trie_value::<Layout<H>, _>(&self.db, &self.root, key).map_err(|_| Error::StorageValueUnavailable)
+		read_trie_value::<Layout<H>, _>(&self.db, &self.root, key)
+			.map_err(|_| Error::StorageValueUnavailable)
 	}
 }
 
@@ -97,7 +98,8 @@ pub mod tests {
 		let (root, proof) = craft_valid_storage_proof();
 
 		// check proof in runtime
-		let checker = <StorageProofChecker<sp_core::Blake2Hasher>>::new(root, proof.clone()).unwrap();
+		let checker =
+			<StorageProofChecker<sp_core::Blake2Hasher>>::new(root, proof.clone()).unwrap();
 		assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec())));
 		assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec())));
 		assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable));
diff --git a/bridges/primitives/test-utils/src/keyring.rs b/bridges/primitives/test-utils/src/keyring.rs
index b83678cae5e5196033e2c94a982945a7c6d3b857..059d6eb5be4f1e910679eab6e1a7db727fbcb506 100644
--- a/bridges/primitives/test-utils/src/keyring.rs
+++ b/bridges/primitives/test-utils/src/keyring.rs
@@ -45,7 +45,8 @@ impl Account {
 		let data = self.0.encode();
 		let mut bytes = [0_u8; 32];
 		bytes[0..data.len()].copy_from_slice(&*data);
-		SecretKey::from_bytes(&bytes).expect("A static array of the correct length is a known good.")
+		SecretKey::from_bytes(&bytes)
+			.expect("A static array of the correct length is a known good.")
 	}
 
 	pub fn pair(&self) -> Keypair {
@@ -57,7 +58,8 @@ impl Account {
 		let public = self.public();
 		pair[32..].copy_from_slice(&public.to_bytes());
 
-		Keypair::from_bytes(&pair).expect("We expect the SecretKey to be good, so this must also be good.")
+		Keypair::from_bytes(&pair)
+			.expect("We expect the SecretKey to be good, so this must also be good.")
 	}
 
 	pub fn sign(&self, msg: &[u8]) -> Signature {
@@ -79,10 +81,7 @@ pub fn voter_set() -> VoterSet<AuthorityId> {
 
 /// Convenience function to get a list of Grandpa authorities.
 pub fn authority_list() -> AuthorityList {
-	test_keyring()
-		.iter()
-		.map(|(id, w)| (AuthorityId::from(*id), *w))
-		.collect()
+	test_keyring().iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect()
 }
 
 /// Get the corresponding identities from the keyring for the "standard" authority set.
diff --git a/bridges/primitives/test-utils/src/lib.rs b/bridges/primitives/test-utils/src/lib.rs
index 83aac614ab8096f7c1cbeffe3f920cb803bf2357..9e044ed472dd743ae93d459bd9f2b713aae8931b 100644
--- a/bridges/primitives/test-utils/src/lib.rs
+++ b/bridges/primitives/test-utils/src/lib.rs
@@ -21,8 +21,7 @@
 use bp_header_chain::justification::GrandpaJustification;
 use codec::Encode;
 use sp_application_crypto::TryFrom;
-use sp_finality_grandpa::{AuthorityId, AuthorityWeight};
-use sp_finality_grandpa::{AuthoritySignature, SetId};
+use sp_finality_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId};
 use sp_runtime::traits::{Header as HeaderT, One, Zero};
 use sp_std::prelude::*;
 
@@ -72,10 +71,7 @@ impl<H: HeaderT> Default for JustificationGeneratorParams<H> {
 
 /// Make a valid GRANDPA justification with sensible defaults
 pub fn make_default_justification<H: HeaderT>(header: &H) -> GrandpaJustification<H> {
-	let params = JustificationGeneratorParams::<H> {
-		header: header.clone(),
-		..Default::default()
-	};
+	let params = JustificationGeneratorParams::<H> { header: header.clone(), ..Default::default() };
 
 	make_justification_for_header(params)
 }
@@ -89,15 +85,11 @@ pub fn make_default_justification<H: HeaderT>(header: &H) -> GrandpaJustificatio
 ///
 /// Note: This needs at least three authorities or else the verifier will complain about
 /// being given an invalid commit.
-pub fn make_justification_for_header<H: HeaderT>(params: JustificationGeneratorParams<H>) -> GrandpaJustification<H> {
-	let JustificationGeneratorParams {
-		header,
-		round,
-		set_id,
-		authorities,
-		mut ancestors,
-		forks,
-	} = params;
+pub fn make_justification_for_header<H: HeaderT>(
+	params: JustificationGeneratorParams<H>,
+) -> GrandpaJustification<H> {
+	let JustificationGeneratorParams { header, round, set_id, authorities, mut ancestors, forks } =
+		params;
 	let (target_hash, target_number) = (header.hash(), *header.number());
 	let mut votes_ancestries = vec![];
 	let mut precommits = vec![];
@@ -144,11 +136,7 @@ pub fn make_justification_for_header<H: HeaderT>(params: JustificationGeneratorP
 
 	GrandpaJustification {
 		round,
-		commit: finality_grandpa::Commit {
-			target_hash,
-			target_number,
-			precommits,
-		},
+		commit: finality_grandpa::Commit { target_hash, target_number, precommits },
 		votes_ancestries,
 	}
 }
@@ -165,10 +153,7 @@ fn generate_chain<H: HeaderT>(fork_id: u32, depth: u32, ancestor: &H) -> Vec<H>
 
 		// Modifying the digest so headers at the same height but in different forks have different
 		// hashes
-		header
-			.digest_mut()
-			.logs
-			.push(sp_runtime::DigestItem::Other(fork_id.encode()));
+		header.digest_mut().logs.push(sp_runtime::DigestItem::Other(fork_id.encode()));
 
 		headers.push(header);
 	}
@@ -183,29 +168,26 @@ pub fn signed_precommit<H: HeaderT>(
 	round: u64,
 	set_id: SetId,
 ) -> finality_grandpa::SignedPrecommit<H::Hash, H::Number, AuthoritySignature, AuthorityId> {
-	let precommit = finality_grandpa::Precommit {
-		target_hash: target.0,
-		target_number: target.1,
-	};
+	let precommit = finality_grandpa::Precommit { target_hash: target.0, target_number: target.1 };
 
-	let encoded =
-		sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()));
+	let encoded = sp_finality_grandpa::localized_payload(
+		round,
+		set_id,
+		&finality_grandpa::Message::Precommit(precommit.clone()),
+	);
 
 	let signature = signer.sign(&encoded);
 	let raw_signature: Vec<u8> = signature.to_bytes().into();
 
-	// Need to wrap our signature and id types that they match what our `SignedPrecommit` is expecting
+	// Need to wrap our signature and id types that they match what our `SignedPrecommit` is
+	// expecting
 	let signature = AuthoritySignature::try_from(raw_signature).expect(
 		"We know our Keypair is good,
 		so our signature must also be good.",
 	);
 	let id = (*signer).into();
 
-	finality_grandpa::SignedPrecommit {
-		precommit,
-		signature,
-		id,
-	}
+	finality_grandpa::SignedPrecommit { precommit, signature, id }
 }
 
 /// Get a header for testing.
@@ -213,13 +195,7 @@ pub fn signed_precommit<H: HeaderT>(
 /// The correct parent hash will be used if given a non-zero header.
 pub fn test_header<H: HeaderT>(number: H::Number) -> H {
 	let default = |num| {
-		H::new(
-			num,
-			Default::default(),
-			Default::default(),
-			Default::default(),
-			Default::default(),
-		)
+		H::new(num, Default::default(), Default::default(), Default::default(), Default::default())
 	};
 
 	let mut header = default(number);
diff --git a/bridges/primitives/token-swap/src/lib.rs b/bridges/primitives/token-swap/src/lib.rs
index 13d6a596cbf3f25fb0bce3515f830bcd14e2bbaa..336e3263f09d9c9674a9490b4b8c03f08d03f92b 100644
--- a/bridges/primitives/token-swap/src/lib.rs
+++ b/bridges/primitives/token-swap/src/lib.rs
@@ -26,8 +26,8 @@ pub enum TokenSwapState {
 	/// The swap has been started using the `start_claim` call, but we have no proof that it has
 	/// happened at the Bridged chain.
 	Started,
-	/// The swap has happened at the Bridged chain and may be claimed by the Bridged chain party using
-	/// the `claim_swap` call.
+	/// The swap has happened at the Bridged chain and may be claimed by the Bridged chain party
+	/// using the `claim_swap` call.
 	Confirmed,
 	/// The swap has failed at the Bridged chain and This chain party may cancel it using the
 	/// `cancel_swap` call.
@@ -43,19 +43,20 @@ pub enum TokenSwapType<ThisBlockNumber> {
 	/// The `target_account_at_bridged_chain` is temporary and only have funds for single swap.
 	///
 	/// ***WARNING**: if `target_account_at_bridged_chain` still exists after the swap has been
-	/// completed (either by claiming or canceling), the `source_account_at_this_chain` will be able
-	/// to restart the swap again and repeat the swap until `target_account_at_bridged_chain` depletes.
+	/// completed (either by claiming or canceling), the `source_account_at_this_chain` will be
+	/// able to restart the swap again and repeat the swap until `target_account_at_bridged_chain`
+	/// depletes.
 	TemporaryTargetAccountAtBridgedChain,
-	/// This swap type prevents `source_account_at_this_chain` from restarting the swap after it has
-	/// been completed. There are two consequences:
+	/// This swap type prevents `source_account_at_this_chain` from restarting the swap after it
+	/// has been completed. There are two consequences:
 	///
-	/// 1) the `source_account_at_this_chain` won't be able to call `start_swap` after given <ThisBlockNumber>;
-	/// 2) the `target_account_at_bridged_chain` won't be able to call `claim_swap` (over the bridge) before
-	///    block `<ThisBlockNumber + 1>`.
+	/// 1) the `source_account_at_this_chain` won't be able to call `start_swap` after given
+	/// <ThisBlockNumber>; 2) the `target_account_at_bridged_chain` won't be able to call
+	/// `claim_swap` (over the bridge) before    block `<ThisBlockNumber + 1>`.
 	///
 	/// The second element is the nonce of the swap. You must care about its uniqueness if you're
-	/// planning to perform another swap with exactly the same parameters (i.e. same amount, same accounts,
-	/// same `ThisBlockNumber`) to avoid collisions.
+	/// planning to perform another swap with exactly the same parameters (i.e. same amount, same
+	/// accounts, same `ThisBlockNumber`) to avoid collisions.
 	LockClaimUntilBlock(ThisBlockNumber, U256),
 }
 
@@ -64,9 +65,11 @@ pub enum TokenSwapType<ThisBlockNumber> {
 ///
 /// **IMPORTANT NOTE**: this structure is always the same during single token swap. So even
 /// when chain changes, the meaning of This and Bridged are still used to point to the same chains.
-/// This chain is always the chain where swap has been started. And the Bridged chain is the other chain.
+/// This chain is always the chain where swap has been started. And the Bridged chain is the other
+/// chain.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
-pub struct TokenSwap<ThisBlockNumber, ThisBalance, ThisAccountId, BridgedBalance, BridgedAccountId> {
+pub struct TokenSwap<ThisBlockNumber, ThisBalance, ThisAccountId, BridgedBalance, BridgedAccountId>
+{
 	/// The type of the swap.
 	pub swap_type: TokenSwapType<ThisBlockNumber>,
 	/// This chain balance to be swapped with `target_balance_at_bridged_chain`.
@@ -75,6 +78,7 @@ pub struct TokenSwap<ThisBlockNumber, ThisBalance, ThisAccountId, BridgedBalance
 	pub source_account_at_this_chain: ThisAccountId,
 	/// Bridged chain balance to be swapped with `source_balance_at_this_chain`.
 	pub target_balance_at_bridged_chain: BridgedBalance,
-	/// Account id of the party acting at the Bridged chain and owning the `target_balance_at_bridged_chain`.
+	/// Account id of the party acting at the Bridged chain and owning the
+	/// `target_balance_at_bridged_chain`.
 	pub target_account_at_bridged_chain: BridgedAccountId,
 }
diff --git a/bridges/relays/bin-ethereum/src/ethereum_client.rs b/bridges/relays/bin-ethereum/src/ethereum_client.rs
index 007bef49fea07908d8c3af3b9a09e2386340376a..75ed57fea16348d793d444352a7bb134cb30f238 100644
--- a/bridges/relays/bin-ethereum/src/ethereum_client.rs
+++ b/bridges/relays/bin-ethereum/src/ethereum_client.rs
@@ -14,8 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::rpc_errors::RpcError;
-use crate::substrate_sync_loop::QueuedRialtoHeader;
+use crate::{rpc_errors::RpcError, substrate_sync_loop::QueuedRialtoHeader};
 
 use async_trait::async_trait;
 use bp_eth_poa::signatures::secret_to_address;
@@ -60,7 +59,10 @@ pub trait EthereumHighLevelRpc {
 	) -> SubmittedHeaders<RialtoHeaderId, RpcError>;
 
 	/// Returns ids of incomplete Substrate headers.
-	async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult<HashSet<RialtoHeaderId>>;
+	async fn incomplete_substrate_headers(
+		&self,
+		contract_address: Address,
+	) -> RpcResult<HashSet<RialtoHeaderId>>;
 
 	/// Complete Substrate header.
 	async fn complete_substrate_header(
@@ -104,7 +106,7 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?;
 
 		if number != number.low_u32().into() {
-			return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber));
+			return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber))
 		}
 
 		Ok(HeaderId(number.low_u32(), hash))
@@ -138,31 +140,28 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let address: Address = secret_to_address(&params.signer);
 		let nonce = match self.account_nonce(address).await {
 			Ok(nonce) => nonce,
-			Err(error) => {
+			Err(error) =>
 				return SubmittedHeaders {
 					submitted: Vec::new(),
 					incomplete: Vec::new(),
 					rejected: headers.iter().rev().map(|header| header.id()).collect(),
 					fatal_error: Some(error.into()),
-				}
-			}
+				},
 		};
 
 		// submit headers. Note that we're cloning self here. It is ok, because
 		// cloning `jsonrpsee::Client` only clones reference to background threads
 		submit_substrate_headers(
-			EthereumHeadersSubmitter {
-				client: self.clone(),
-				params,
-				contract_address,
-				nonce,
-			},
+			EthereumHeadersSubmitter { client: self.clone(), params, contract_address, nonce },
 			headers,
 		)
 		.await
 	}
 
-	async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult<HashSet<RialtoHeaderId>> {
+	async fn incomplete_substrate_headers(
+		&self,
+		contract_address: Address,
+	) -> RpcResult<HashSet<RialtoHeaderId>> {
 		let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call();
 		let call_request = CallRequest {
 			to: Some(contract_address),
@@ -173,13 +172,14 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let call_result = self.eth_call(call_request).await?;
 
 		// Q: Is is correct to call these "incomplete_ids"?
-		let (incomplete_headers_numbers, incomplete_headers_hashes) = call_decoder.decode(&call_result.0)?;
+		let (incomplete_headers_numbers, incomplete_headers_hashes) =
+			call_decoder.decode(&call_result.0)?;
 		let incomplete_ids = incomplete_headers_numbers
 			.into_iter()
 			.zip(incomplete_headers_hashes)
 			.filter_map(|(number, hash)| {
 				if number != number.low_u32().into() {
-					return None;
+					return None
 				}
 
 				Some(HeaderId(number.low_u32(), hash))
@@ -202,7 +202,11 @@ impl EthereumHighLevelRpc for EthereumClient {
 				Some(contract_address),
 				None,
 				false,
-				bridge_contract::functions::import_finality_proof::encode_input(id.0, id.1, justification),
+				bridge_contract::functions::import_finality_proof::encode_input(
+					id.0,
+					id.1,
+					justification,
+				),
 			)
 			.await?;
 
@@ -263,7 +267,7 @@ impl HeadersBatch {
 	) -> Result<(Self, Vec<RialtoHeaderId>), ()> {
 		if headers.len() != ids.len() {
 			log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len());
-			return Err(());
+			return Err(())
 		}
 
 		let header1 = headers.pop().ok_or(())?;
@@ -276,27 +280,14 @@ impl HeadersBatch {
 			submitting_ids.extend(ids.pop().iter());
 		}
 
-		Ok((
-			Self {
-				header1,
-				header2,
-				header3,
-				header4,
-			},
-			submitting_ids,
-		))
+		Ok((Self { header1, header2, header3, header4 }, submitting_ids))
 	}
 
 	/// Returns unified array of headers.
 	///
 	/// The first element is always `Some`.
 	fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] {
-		[
-			Some(&self.header1),
-			self.header2.as_ref(),
-			self.header3.as_ref(),
-			self.header4.as_ref(),
-		]
+		[Some(&self.header1), self.header2.as_ref(), self.header3.as_ref(), self.header4.as_ref()]
 	}
 
 	/// Encodes all headers. If header is not present an empty vector will be returned.
@@ -323,9 +314,10 @@ impl HeadersBatch {
 	/// or when `idx > HEADERS_BATCH`.
 	pub fn split_off(&mut self, idx: usize) -> Result<(), ()> {
 		if idx == 0 || idx > HEADERS_BATCH {
-			return Err(());
+			return Err(())
 		}
-		let mut vals: [_; HEADERS_BATCH] = [&mut None, &mut self.header2, &mut self.header3, &mut self.header4];
+		let mut vals: [_; HEADERS_BATCH] =
+			[&mut None, &mut self.header2, &mut self.header3, &mut self.header4];
 		for val in vals.iter_mut().skip(idx) {
 			**val = None;
 		}
@@ -359,7 +351,8 @@ struct EthereumHeadersSubmitter {
 impl HeadersSubmitter for EthereumHeadersSubmitter {
 	async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult<usize> {
 		let [h1, h2, h3, h4] = headers.encode();
-		let (encoded_call, call_decoder) = bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4);
+		let (encoded_call, call_decoder) =
+			bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4);
 		let call_request = CallRequest {
 			to: Some(self.contract_address),
 			data: Some(encoded_call.into()),
@@ -369,7 +362,7 @@ impl HeadersSubmitter for EthereumHeadersSubmitter {
 		let call_result = self.client.eth_call(call_request).await?;
 		let incomplete_index: U256 = call_decoder.decode(&call_result.0)?;
 		if incomplete_index > HEADERS_BATCH.into() {
-			return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex));
+			return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex))
 		}
 
 		Ok(incomplete_index.low_u32() as _)
@@ -407,17 +400,21 @@ async fn submit_substrate_headers(
 	headers.reverse();
 
 	while !headers.is_empty() {
-		let (headers, submitting_ids) =
-			HeadersBatch::pop_from(&mut headers, &mut ids).expect("Headers and ids are not empty; qed");
+		let (headers, submitting_ids) = HeadersBatch::pop_from(&mut headers, &mut ids)
+			.expect("Headers and ids are not empty; qed");
 
-		submitted_headers.fatal_error =
-			submit_substrate_headers_batch(&mut header_submitter, &mut submitted_headers, submitting_ids, headers)
-				.await;
+		submitted_headers.fatal_error = submit_substrate_headers_batch(
+			&mut header_submitter,
+			&mut submitted_headers,
+			submitting_ids,
+			headers,
+		)
+		.await;
 
 		if submitted_headers.fatal_error.is_some() {
 			ids.reverse();
 			submitted_headers.rejected.extend(ids);
-			break;
+			break
 		}
 	}
 
@@ -436,9 +433,11 @@ async fn submit_substrate_headers_batch(
 	// if parent of first header is either incomplete, or rejected, we assume that contract
 	// will reject this header as well
 	let parent_id = headers.header1.parent_id();
-	if submitted_headers.rejected.contains(&parent_id) || submitted_headers.incomplete.contains(&parent_id) {
+	if submitted_headers.rejected.contains(&parent_id) ||
+		submitted_headers.incomplete.contains(&parent_id)
+	{
 		submitted_headers.rejected.extend(ids);
-		return None;
+		return None
 	}
 
 	// check if headers are incomplete
@@ -450,11 +449,11 @@ async fn submit_substrate_headers_batch(
 			// contract has rejected all headers => we do not want to submit it
 			submitted_headers.rejected.extend(ids);
 			if error.is_connection_error() {
-				return Some(error);
+				return Some(error)
 			} else {
-				return None;
+				return None
 			}
-		}
+		},
 	};
 
 	// Modify `ids` and `headers` to only contain values that are going to be accepted.
@@ -477,12 +476,12 @@ async fn submit_substrate_headers_batch(
 			submitted_headers.submitted.extend(submitted);
 			submitted_headers.rejected.extend(rejected);
 			None
-		}
+		},
 		Err(error) => {
 			submitted_headers.rejected.extend(submitted);
 			submitted_headers.rejected.extend(rejected);
 			Some(error)
-		}
+		},
 	}
 }
 
@@ -521,11 +520,7 @@ mod tests {
 				number,
 				Default::default(),
 				Default::default(),
-				if number == 0 {
-					Default::default()
-				} else {
-					header(number - 1).id().1
-				},
+				if number == 0 { Default::default() } else { header(number - 1).id().1 },
 				Default::default(),
 			)
 			.into(),
@@ -535,10 +530,7 @@ mod tests {
 	#[test]
 	fn descendants_of_incomplete_headers_are_not_submitted() {
 		let submitted_headers = async_std::task::block_on(submit_substrate_headers(
-			TestHeadersSubmitter {
-				incomplete: vec![header(5).id()],
-				failed: vec![],
-			},
+			TestHeadersSubmitter { incomplete: vec![header(5).id()], failed: vec![] },
 			vec![header(5), header(6)],
 		));
 		assert_eq!(submitted_headers.submitted, vec![header(5).id()]);
@@ -550,19 +542,8 @@ mod tests {
 	#[test]
 	fn headers_after_fatal_error_are_not_submitted() {
 		let submitted_headers = async_std::task::block_on(submit_substrate_headers(
-			TestHeadersSubmitter {
-				incomplete: vec![],
-				failed: vec![header(9).id()],
-			},
-			vec![
-				header(5),
-				header(6),
-				header(7),
-				header(8),
-				header(9),
-				header(10),
-				header(11),
-			],
+			TestHeadersSubmitter { incomplete: vec![], failed: vec![header(9).id()] },
+			vec![header(5), header(6), header(7), header(8), header(9), header(10), header(11)],
 		));
 		assert_eq!(
 			submitted_headers.submitted,
@@ -583,10 +564,7 @@ mod tests {
 		let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap();
 		assert_eq!(init_headers, vec![header(5)]);
 		assert_eq!(init_ids, vec![header(5).id()]);
-		assert_eq!(
-			ids,
-			vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]
-		);
+		assert_eq!(ids, vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]);
 		headers
 	}
 
diff --git a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
index 3f9076f6db2298e0bcbbd84be8e2a783cfc64b32..d7006a9c673cfaed7a21f4ed4841af8092f27a04 100644
--- a/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
+++ b/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
@@ -14,17 +14,21 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::ethereum_client::{bridge_contract, EthereumHighLevelRpc};
-use crate::rpc_errors::RpcError;
+use crate::{
+	ethereum_client::{bridge_contract, EthereumHighLevelRpc},
+	rpc_errors::RpcError,
+};
 
 use codec::{Decode, Encode};
 use num_traits::Zero;
 use relay_ethereum_client::{
-	Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
+	SigningParams as EthereumSigningParams,
 };
 use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto};
 use relay_substrate_client::{
-	Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, OpaqueGrandpaAuthoritiesSet,
+	Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	OpaqueGrandpaAuthoritiesSet,
 };
 use relay_utils::HeaderId;
 
@@ -102,19 +106,18 @@ async fn prepare_initial_header(
 	sub_initial_header: Option<Vec<u8>>,
 ) -> Result<(RialtoHeaderId, Vec<u8>), String> {
 	match sub_initial_header {
-		Some(raw_initial_header) => match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) {
-			Ok(initial_header) => Ok((
-				HeaderId(initial_header.number, initial_header.hash()),
-				raw_initial_header,
-			)),
-			Err(error) => Err(format!("Error decoding initial header: {}", error)),
-		},
+		Some(raw_initial_header) =>
+			match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) {
+				Ok(initial_header) =>
+					Ok((HeaderId(initial_header.number, initial_header.hash()), raw_initial_header)),
+				Err(error) => Err(format!("Error decoding initial header: {}", error)),
+			},
 		None => {
 			let initial_header = sub_client.header_by_number(Zero::zero()).await;
 			initial_header
 				.map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode()))
 				.map_err(|error| format!("Error reading Substrate genesis header: {:?}", error))
-		}
+		},
 	}
 }
 
@@ -129,7 +132,8 @@ async fn prepare_initial_authorities_set(
 		None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await,
 	};
 
-	initial_authorities_set.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error))
+	initial_authorities_set
+		.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error))
 }
 
 /// Deploy bridge contract to Ethereum chain.
@@ -147,7 +151,12 @@ async fn deploy_bridge_contract(
 			None,
 			None,
 			false,
-			bridge_contract::constructor(contract_code, initial_header, initial_set_id, initial_authorities),
+			bridge_contract::constructor(
+				contract_code,
+				initial_header,
+				initial_set_id,
+				initial_authorities,
+			),
 		)
 		.await
 		.map_err(|error| format!("Error deploying contract: {:?}", error))
diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
index 73a107cb40655023ae0f187ea0fca2feebc3b746..6262c44c18d2c79fba10a5616e358740aa587a7d 100644
--- a/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
+++ b/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
@@ -16,28 +16,34 @@
 
 //! Relaying proofs of PoA -> Substrate exchange transactions.
 
-use crate::instances::BridgeInstance;
-use crate::rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc};
-use crate::rpc_errors::RpcError;
-use crate::substrate_types::into_substrate_ethereum_receipt;
+use crate::{
+	instances::BridgeInstance,
+	rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc},
+	rpc_errors::RpcError,
+	substrate_types::into_substrate_ethereum_receipt,
+};
 
 use async_trait::async_trait;
 use bp_currency_exchange::MaybeLockFundsTransaction;
-use exchange_relay::exchange::{
-	relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient,
-	TransactionProofPipeline,
+use exchange_relay::{
+	exchange::{
+		relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient,
+		TransactionProofPipeline,
+	},
+	exchange_loop::{run as run_loop, InMemoryStorage},
 };
-use exchange_relay::exchange_loop::{run as run_loop, InMemoryStorage};
 use relay_ethereum_client::{
 	types::{
 		HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions,
-		Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, HEADER_ID_PROOF,
+		Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256,
+		HEADER_ID_PROOF,
 	},
 	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
 };
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
 use relay_substrate_client::{
-	Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	Chain as SubstrateChain, Client as SubstrateClient,
+	ConnectionParams as SubstrateConnectionParams,
 };
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId};
 use rialto_runtime::exchange::EthereumTransactionInclusionProof;
@@ -111,12 +117,7 @@ impl SourceBlock for EthereumSourceBlock {
 	}
 
 	fn transactions(&self) -> Vec<Self::Transaction> {
-		self.0
-			.transactions
-			.iter()
-			.cloned()
-			.map(EthereumSourceTransaction)
-			.collect()
+		self.0.transactions.iter().cloned().map(EthereumSourceTransaction).collect()
 	}
 }
 
@@ -178,13 +179,12 @@ impl SourceClient<EthereumToSubstrateExchange> for EthereumTransactionsSource {
 		};
 
 		// we need transaction to be mined => check if it is included in the block
-		let (eth_header_id, eth_tx_index) = match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) {
-			(Some(block_number), Some(block_hash), Some(transaction_index)) => (
-				HeaderId(block_number.as_u64(), block_hash),
-				transaction_index.as_u64() as _,
-			),
-			_ => return Ok(None),
-		};
+		let (eth_header_id, eth_tx_index) =
+			match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) {
+				(Some(block_number), Some(block_hash), Some(transaction_index)) =>
+					(HeaderId(block_number.as_u64(), block_hash), transaction_index.as_u64() as _),
+				_ => return Ok(None),
+			};
 
 		Ok(Some((eth_header_id, eth_tx_index)))
 	}
@@ -194,9 +194,11 @@ impl SourceClient<EthereumToSubstrateExchange> for EthereumTransactionsSource {
 		block: &EthereumSourceBlock,
 		tx_index: usize,
 	) -> Result<EthereumTransactionInclusionProof, RpcError> {
-		const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\
+		const TRANSACTION_HAS_RAW_FIELD_PROOF: &str =
+			"RPC level checks that transactions from Ethereum\
 			node are having `raw` field; qed";
-		const BLOCK_HAS_HASH_FIELD_PROOF: &str = "RPC level checks that block has `hash` field; qed";
+		const BLOCK_HAS_HASH_FIELD_PROOF: &str =
+			"RPC level checks that block has `hash` field; qed";
 
 		let mut transaction_proof = Vec::with_capacity(block.0.transactions.len());
 		for tx in &block.0.transactions {
@@ -266,12 +268,15 @@ impl TargetClient<EthereumToSubstrateExchange> for SubstrateTransactionsTarget {
 		self.client.best_ethereum_finalized_block().await
 	}
 
-	async fn filter_transaction_proof(&self, proof: &EthereumTransactionInclusionProof) -> Result<bool, RpcError> {
+	async fn filter_transaction_proof(
+		&self,
+		proof: &EthereumTransactionInclusionProof,
+	) -> Result<bool, RpcError> {
 		// let's try to parse transaction locally
 		let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize];
 		let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx);
 		if parse_result.is_err() {
-			return Ok(false);
+			return Ok(false)
 		}
 
 		// now let's check if transaction is successful
@@ -285,8 +290,12 @@ impl TargetClient<EthereumToSubstrateExchange> for SubstrateTransactionsTarget {
 		self.client.verify_exchange_transaction_proof(proof.clone()).await
 	}
 
-	async fn submit_transaction_proof(&self, proof: EthereumTransactionInclusionProof) -> Result<(), RpcError> {
-		let (sign_params, bridge_instance) = (self.sign_params.clone(), self.bridge_instance.clone());
+	async fn submit_transaction_proof(
+		&self,
+		proof: EthereumTransactionInclusionProof,
+	) -> Result<(), RpcError> {
+		let (sign_params, bridge_instance) =
+			(self.sign_params.clone(), self.bridge_instance.clone());
 		self.client
 			.submit_exchange_transaction_proof(sign_params, bridge_instance, proof)
 			.await
@@ -311,9 +320,10 @@ pub async fn run(params: EthereumExchangeParams) {
 					err,
 				),
 			}
-		}
+		},
 		ExchangeRelayMode::Auto(eth_start_with_block_number) => {
-			let result = run_auto_transactions_relay_loop(params, eth_start_with_block_number).await;
+			let result =
+				run_auto_transactions_relay_loop(params, eth_start_with_block_number).await;
 			if let Err(err) = result {
 				log::error!(
 					target: "bridge",
@@ -321,23 +331,18 @@ pub async fn run(params: EthereumExchangeParams) {
 					err,
 				);
 			}
-		}
+		},
 	}
 }
 
 /// Run single transaction proof relay and stop.
-async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) -> Result<(), String> {
-	let EthereumExchangeParams {
-		eth_params,
-		sub_params,
-		sub_sign,
-		instance,
-		..
-	} = params;
+async fn run_single_transaction_relay(
+	params: EthereumExchangeParams,
+	eth_tx_hash: H256,
+) -> Result<(), String> {
+	let EthereumExchangeParams { eth_params, sub_params, sub_sign, instance, .. } = params;
 
-	let eth_client = EthereumClient::try_connect(eth_params)
-		.await
-		.map_err(RpcError::Ethereum)?;
+	let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?;
 	let sub_client = SubstrateClient::<Rialto>::try_connect(sub_params)
 		.await
 		.map_err(RpcError::Substrate)?;
@@ -357,12 +362,7 @@ async fn run_auto_transactions_relay_loop(
 	eth_start_with_block_number: Option<u64>,
 ) -> anyhow::Result<()> {
 	let EthereumExchangeParams {
-		eth_params,
-		sub_params,
-		sub_sign,
-		metrics_params,
-		instance,
-		..
+		eth_params, sub_params, sub_sign, metrics_params, instance, ..
 	} = params;
 
 	let eth_client = EthereumClient::new(eth_params).await;
@@ -370,7 +370,7 @@ async fn run_auto_transactions_relay_loop(
 
 	let eth_start_with_block_number = match eth_start_with_block_number {
 		Some(eth_start_with_block_number) => eth_start_with_block_number,
-		None => {
+		None =>
 			sub_client
 				.best_ethereum_finalized_block()
 				.await
@@ -380,8 +380,7 @@ async fn run_auto_transactions_relay_loop(
 						err
 					)
 				})?
-				.0
-		}
+				.0,
 	};
 
 	run_loop(
diff --git a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
index 602d4f14e4f0bacc7ab360b2db288428c4ab758b..75bdf0e577aa209a96bbb74d170c87dfd59bdebf 100644
--- a/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
+++ b/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
@@ -22,7 +22,8 @@ use bp_eth_poa::{
 };
 use relay_ethereum_client::{
 	types::{CallRequest, U256},
-	Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
+	SigningParams as EthereumSigningParams,
 };
 use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS;
 
@@ -43,13 +44,8 @@ pub struct EthereumExchangeSubmitParams {
 
 /// Submit single Ethereum -> Substrate exchange transaction.
 pub async fn run(params: EthereumExchangeSubmitParams) {
-	let EthereumExchangeSubmitParams {
-		eth_params,
-		eth_sign,
-		eth_nonce,
-		eth_amount,
-		sub_recipient,
-	} = params;
+	let EthereumExchangeSubmitParams { eth_params, eth_sign, eth_nonce, eth_amount, sub_recipient } =
+		params;
 
 	let result: Result<_, String> = async move {
 		let eth_client = EthereumClient::try_connect(eth_params)
@@ -83,9 +79,8 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 			value: eth_amount,
 			payload: sub_recipient_encoded.to_vec(),
 		};
-		let eth_tx_signed = eth_tx_unsigned
-			.clone()
-			.sign_by(&eth_sign.signer, Some(eth_sign.chain_id));
+		let eth_tx_signed =
+			eth_tx_unsigned.clone().sign_by(&eth_sign.signer, Some(eth_sign.chain_id));
 		eth_client
 			.submit_transaction(eth_tx_signed)
 			.await
@@ -102,13 +97,13 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 				"Exchange transaction has been submitted to Ethereum node: {:?}",
 				eth_tx_unsigned,
 			);
-		}
+		},
 		Err(err) => {
 			log::error!(
 				target: "bridge",
 				"Error submitting exchange transaction to Ethereum node: {}",
 				err,
 			);
-		}
+		},
 	}
 }
diff --git a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs b/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
index c4a5f5102a7339f1a3b687c1259c5ff6e19da53d..ee5f8a4600ece2376ac2c2d97112ad16cc2ffd1c 100644
--- a/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
+++ b/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
@@ -16,11 +16,13 @@
 
 //! Ethereum PoA -> Rialto-Substrate synchronization.
 
-use crate::ethereum_client::EthereumHighLevelRpc;
-use crate::instances::BridgeInstance;
-use crate::rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc};
-use crate::rpc_errors::RpcError;
-use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts};
+use crate::{
+	ethereum_client::EthereumHighLevelRpc,
+	instances::BridgeInstance,
+	rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc},
+	rpc_errors::RpcError,
+	substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts},
+};
 
 use async_trait::async_trait;
 use codec::Encode;
@@ -35,12 +37,12 @@ use relay_ethereum_client::{
 };
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
 use relay_substrate_client::{
-	Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	Chain as SubstrateChain, Client as SubstrateClient,
+	ConnectionParams as SubstrateConnectionParams,
 };
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient};
 
-use std::fmt::Debug;
-use std::{collections::HashSet, sync::Arc, time::Duration};
+use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
 
 pub mod consts {
 	use super::*;
@@ -57,7 +59,8 @@ pub mod consts {
 	pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128;
 	/// Max Ethereum headers count we want to have in 'submitted' state.
 	pub const MAX_SUBMITTED_HEADERS: usize = 128;
-	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned).
+	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten
+	/// (pruned).
 	pub const PRUNE_DEPTH: u32 = 4096;
 }
 
@@ -106,8 +109,8 @@ impl HeadersSyncPipeline for EthereumHeadersSyncPipeline {
 	type Completion = ();
 
 	fn estimate_size(source: &QueuedHeader<Self>) -> usize {
-		into_substrate_ethereum_header(source.header()).encode().len()
-			+ into_substrate_ethereum_receipts(source.extra())
+		into_substrate_ethereum_header(source.header()).encode().len() +
+			into_substrate_ethereum_receipts(source.extra())
 				.map(|extra| extra.encode().len())
 				.unwrap_or(0)
 	}
@@ -148,22 +151,17 @@ impl SourceClient<EthereumHeadersSyncPipeline> for EthereumHeadersSource {
 	}
 
 	async fn header_by_hash(&self, hash: HeaderHash) -> Result<Header, RpcError> {
-		self.client
-			.header_by_hash(hash)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into)
 	}
 
 	async fn header_by_number(&self, number: u64) -> Result<Header, RpcError> {
-		self.client
-			.header_by_number(number)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_number(number).await.map(Into::into).map_err(Into::into)
 	}
 
-	async fn header_completion(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, Option<()>), RpcError> {
+	async fn header_completion(
+		&self,
+		id: EthereumHeaderId,
+	) -> Result<(EthereumHeaderId, Option<()>), RpcError> {
 		Ok((id, None))
 	}
 
@@ -172,9 +170,7 @@ impl SourceClient<EthereumHeadersSyncPipeline> for EthereumHeadersSource {
 		id: EthereumHeaderId,
 		header: QueuedEthereumHeader,
 	) -> Result<(EthereumHeaderId, Vec<Receipt>), RpcError> {
-		self.client
-			.transaction_receipts(id, header.header().transactions.clone())
-			.await
+		self.client.transaction_receipts(id, header.header().transactions.clone()).await
 	}
 }
 
@@ -197,12 +193,7 @@ impl SubstrateHeadersTarget {
 		sign_params: RialtoSigningParams,
 		bridge_instance: Arc<dyn BridgeInstance>,
 	) -> Self {
-		Self {
-			client,
-			sign_transactions,
-			sign_params,
-			bridge_instance,
-		}
+		Self { client, sign_transactions, sign_params, bridge_instance }
 	}
 }
 
@@ -225,16 +216,19 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
 		self.client.best_ethereum_block().await
 	}
 
-	async fn is_known_header(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, bool), RpcError> {
+	async fn is_known_header(
+		&self,
+		id: EthereumHeaderId,
+	) -> Result<(EthereumHeaderId, bool), RpcError> {
 		Ok((id, self.client.ethereum_header_known(id).await?))
 	}
 
-	async fn submit_headers(&self, headers: Vec<QueuedEthereumHeader>) -> SubmittedHeaders<EthereumHeaderId, RpcError> {
-		let (sign_params, bridge_instance, sign_transactions) = (
-			self.sign_params.clone(),
-			self.bridge_instance.clone(),
-			self.sign_transactions,
-		);
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedEthereumHeader>,
+	) -> SubmittedHeaders<EthereumHeaderId, RpcError> {
+		let (sign_params, bridge_instance, sign_transactions) =
+			(self.sign_params.clone(), self.bridge_instance.clone(), self.sign_transactions);
 		self.client
 			.submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions)
 			.await
@@ -245,11 +239,18 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
 	}
 
 	#[allow(clippy::unit_arg)]
-	async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result<EthereumHeaderId, RpcError> {
+	async fn complete_header(
+		&self,
+		id: EthereumHeaderId,
+		_completion: (),
+	) -> Result<EthereumHeaderId, RpcError> {
 		Ok(id)
 	}
 
-	async fn requires_extra(&self, header: QueuedEthereumHeader) -> Result<(EthereumHeaderId, bool), RpcError> {
+	async fn requires_extra(
+		&self,
+		header: QueuedEthereumHeader,
+	) -> Result<(EthereumHeaderId, bool), RpcError> {
 		// we can minimize number of receipts_check calls by checking header
 		// logs bloom here, but it may give us false positives (when authorities
 		// source is contract, we never need any logs)
diff --git a/bridges/relays/bin-ethereum/src/instances.rs b/bridges/relays/bin-ethereum/src/instances.rs
index 2ade8632a92c03d0ccb208c612174ba99652a0aa..90d736fa251d790810cec97e99fdd0286213a708 100644
--- a/bridges/relays/bin-ethereum/src/instances.rs
+++ b/bridges/relays/bin-ethereum/src/instances.rs
@@ -18,16 +18,18 @@
 //! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we
 //! must somehow decide which of the instances to sync.
 //!
-//! Note that each instance of the bridge pallet is coupled with an instance of the currency exchange
-//! pallet. We must also have a way to create `Call`s for the correct currency exchange instance.
+//! Note that each instance of the bridge pallet is coupled with an instance of the currency
+//! exchange pallet. We must also have a way to create `Call`s for the correct currency exchange
+//! instance.
 //!
 //! This module helps by preparing the correct `Call`s for each of the different pallet instances.
 
-use crate::ethereum_sync_loop::QueuedEthereumHeader;
-use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts};
+use crate::{
+	ethereum_sync_loop::QueuedEthereumHeader,
+	substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts},
+};
 
-use rialto_runtime::exchange::EthereumTransactionInclusionProof as Proof;
-use rialto_runtime::Call;
+use rialto_runtime::{exchange::EthereumTransactionInclusionProof as Proof, Call};
 
 /// Interface for `Calls` which are needed to correctly sync the bridge.
 ///
@@ -73,7 +75,8 @@ impl BridgeInstance for RialtoPoA {
 	}
 
 	fn build_currency_exchange_call(&self, proof: Proof) -> Call {
-		let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
+		let pallet_call =
+			rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
 		rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call)
 	}
 }
@@ -109,7 +112,8 @@ impl BridgeInstance for Kovan {
 	}
 
 	fn build_currency_exchange_call(&self, proof: Proof) -> Call {
-		let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
+		let pallet_call =
+			rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
 		rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call)
 	}
 }
diff --git a/bridges/relays/bin-ethereum/src/main.rs b/bridges/relays/bin-ethereum/src/main.rs
index bcdae353d3dc4d4aab3af720a59bbea6746704dc..cb64dc40d935e4bf07f1177c8df55cff7a2be343 100644
--- a/bridges/relays/bin-ethereum/src/main.rs
+++ b/bridges/relays/bin-ethereum/src/main.rs
@@ -43,7 +43,9 @@ use sp_core::crypto::Pair;
 use substrate_sync_loop::SubstrateSyncParams;
 
 use headers_relay::sync::HeadersSyncParams;
-use relay_ethereum_client::{ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams};
+use relay_ethereum_client::{
+	ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+};
 use relay_rialto_client::SigningParams as RialtoSigningParams;
 use relay_substrate_client::ConnectionParams as SubstrateConnectionParams;
 use std::sync::Arc;
@@ -64,79 +66,83 @@ async fn run_command(matches: &clap::ArgMatches<'_>) {
 				Ok(ethereum_sync_params) => ethereum_sync_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error parsing parameters: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await
 			.is_err()
 			{
 				log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync.");
 			};
-		}
+		},
 		("sub-to-eth", Some(sub_to_eth_matches)) => {
 			log::info!(target: "bridge", "Starting SUB âž¡ ETH relay.");
 			if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) {
 				Ok(substrate_sync_params) => substrate_sync_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error parsing parameters: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await
 			.is_err()
 			{
 				log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync.");
 			};
-		}
+		},
 		("eth-deploy-contract", Some(eth_deploy_matches)) => {
 			log::info!(target: "bridge", "Deploying ETH contracts.");
-			ethereum_deploy_contract::run(match ethereum_deploy_contract_params(eth_deploy_matches) {
-				Ok(ethereum_deploy_params) => ethereum_deploy_params,
-				Err(err) => {
-					log::error!(target: "bridge", "Error during contract deployment: {}", err);
-					return;
-				}
-			})
+			ethereum_deploy_contract::run(
+				match ethereum_deploy_contract_params(eth_deploy_matches) {
+					Ok(ethereum_deploy_params) => ethereum_deploy_params,
+					Err(err) => {
+						log::error!(target: "bridge", "Error during contract deployment: {}", err);
+						return
+					},
+				},
+			)
 			.await;
-		}
+		},
 		("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => {
 			log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction.");
-			ethereum_exchange_submit::run(match ethereum_exchange_submit_params(eth_exchange_submit_matches) {
-				Ok(eth_exchange_submit_params) => eth_exchange_submit_params,
-				Err(err) => {
-					log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err);
-					return;
-				}
-			})
+			ethereum_exchange_submit::run(
+				match ethereum_exchange_submit_params(eth_exchange_submit_matches) {
+					Ok(eth_exchange_submit_params) => eth_exchange_submit_params,
+					Err(err) => {
+						log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err);
+						return
+					},
+				},
+			)
 			.await;
-		}
+		},
 		("eth-exchange-sub", Some(eth_exchange_matches)) => {
 			log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay.");
 			ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) {
 				Ok(eth_exchange_params) => eth_exchange_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await;
-		}
+		},
 		("", _) => {
 			log::error!(target: "bridge", "No subcommand specified");
-		}
+		},
 		_ => unreachable!("all possible subcommands are checked above; qed"),
 	}
 }
 
-fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result<EthereumConnectionParams, String> {
+fn ethereum_connection_params(
+	matches: &clap::ArgMatches,
+) -> Result<EthereumConnectionParams, String> {
 	let mut params = EthereumConnectionParams::default();
 	if let Some(eth_host) = matches.value_of("eth-host") {
 		params.host = eth_host.into();
 	}
 	if let Some(eth_port) = matches.value_of("eth-port") {
-		params.port = eth_port
-			.parse()
-			.map_err(|e| format!("Failed to parse eth-port: {}", e))?;
+		params.port = eth_port.parse().map_err(|e| format!("Failed to parse eth-port: {}", e))?;
 	}
 	Ok(params)
 }
@@ -144,9 +150,10 @@ fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result<EthereumConn
 fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result<EthereumSigningParams, String> {
 	let mut params = EthereumSigningParams::default();
 	if let Some(eth_signer) = matches.value_of("eth-signer") {
-		params.signer =
-			SecretKey::parse_slice(&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?)
-				.map_err(|e| format!("Invalid eth-signer: {}", e))?;
+		params.signer = SecretKey::parse_slice(
+			&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?,
+		)
+		.map_err(|e| format!("Invalid eth-signer: {}", e))?;
 	}
 	if let Some(eth_chain_id) = matches.value_of("eth-chain-id") {
 		params.chain_id = eth_chain_id
@@ -156,15 +163,15 @@ fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result<EthereumSigning
 	Ok(params)
 }
 
-fn substrate_connection_params(matches: &clap::ArgMatches) -> Result<SubstrateConnectionParams, String> {
+fn substrate_connection_params(
+	matches: &clap::ArgMatches,
+) -> Result<SubstrateConnectionParams, String> {
 	let mut params = SubstrateConnectionParams::default();
 	if let Some(sub_host) = matches.value_of("sub-host") {
 		params.host = sub_host.into();
 	}
 	if let Some(sub_port) = matches.value_of("sub-port") {
-		params.port = sub_port
-			.parse()
-			.map_err(|e| format!("Failed to parse sub-port: {}", e))?;
+		params.port = sub_port.parse().map_err(|e| format!("Failed to parse sub-port: {}", e))?;
 	}
 	Ok(params)
 }
@@ -199,7 +206,7 @@ fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result<EthereumSyncParams
 
 			// tx pool won't accept too much unsigned transactions
 			sync_params.max_headers_in_submitted_status = 10;
-		}
+		},
 		Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup,
 		Some(mode) => return Err(format!("Invalid sub-tx-mode: {}", mode)),
 		None => sync_params.target_tx_mode = TargetTransactionMode::Signed,
@@ -252,10 +259,14 @@ fn substrate_sync_params(matches: &clap::ArgMatches) -> Result<SubstrateSyncPara
 	Ok(params)
 }
 
-fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result<EthereumDeployContractParams, String> {
-	let eth_contract_code = parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| {
-		hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")).expect("code is hardcoded, thus valid; qed")
-	});
+fn ethereum_deploy_contract_params(
+	matches: &clap::ArgMatches,
+) -> Result<EthereumDeployContractParams, String> {
+	let eth_contract_code =
+		parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| {
+			hex::decode(include_str!("../res/substrate-bridge-bytecode.hex"))
+				.expect("code is hardcoded, thus valid; qed")
+		});
 	let sub_initial_authorities_set_id = matches
 		.value_of("sub-authorities-set-id")
 		.map(|set| {
@@ -281,7 +292,9 @@ fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result<Ethereu
 	Ok(params)
 }
 
-fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<EthereumExchangeSubmitParams, String> {
+fn ethereum_exchange_submit_params(
+	matches: &clap::ArgMatches,
+) -> Result<EthereumExchangeSubmitParams, String> {
 	let eth_nonce = matches
 		.value_of("eth-nonce")
 		.map(|eth_nonce| {
@@ -293,9 +306,7 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 	let eth_amount = matches
 		.value_of("eth-amount")
 		.map(|eth_amount| {
-			eth_amount
-				.parse()
-				.map_err(|e| format!("Failed to parse eth-amount: {}", e))
+			eth_amount.parse().map_err(|e| format!("Failed to parse eth-amount: {}", e))
 		})
 		.transpose()?
 		.unwrap_or_else(|| {
@@ -304,7 +315,8 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 		});
 
 	// This is the well-known Substrate account of Ferdie
-	let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c");
+	let default_recepient =
+		hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c");
 
 	let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") {
 		hex::decode(&sub_recipient)
@@ -340,9 +352,7 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result<EthereumExchangeParams, String> {
 	let mode = match matches.value_of("eth-tx-hash") {
 		Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single(
-			eth_tx_hash
-				.parse()
-				.map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?,
+			eth_tx_hash.parse().map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?,
 		),
 		None => ethereum_exchange::ExchangeRelayMode::Auto(
 			matches
@@ -372,7 +382,7 @@ fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result<EthereumExchan
 
 fn metrics_params(matches: &clap::ArgMatches) -> Result<MetricsParams, String> {
 	if matches.is_present("no-prometheus") {
-		return Ok(None.into());
+		return Ok(None.into())
 	}
 
 	let mut metrics_params = MetricsAddress::default();
@@ -405,9 +415,8 @@ fn instance_params(matches: &clap::ArgMatches) -> Result<Arc<dyn BridgeInstance>
 
 fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result<Option<Vec<u8>>, String> {
 	match matches.value_of(arg) {
-		Some(value) => Ok(Some(
-			hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?,
-		)),
+		Some(value) =>
+			Ok(Some(hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?)),
 		None => Ok(None),
 	}
 }
diff --git a/bridges/relays/bin-ethereum/src/rialto_client.rs b/bridges/relays/bin-ethereum/src/rialto_client.rs
index 35518f5e19cc20894ed354d65eda9fc6b8eb82f5..1dadf9f7ddff5a69b650236732566fcb68a26cd3 100644
--- a/bridges/relays/bin-ethereum/src/rialto_client.rs
+++ b/bridges/relays/bin-ethereum/src/rialto_client.rs
@@ -14,9 +14,9 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::ethereum_sync_loop::QueuedEthereumHeader;
-use crate::instances::BridgeInstance;
-use crate::rpc_errors::RpcError;
+use crate::{
+	ethereum_sync_loop::QueuedEthereumHeader, instances::BridgeInstance, rpc_errors::RpcError,
+};
 
 use async_trait::async_trait;
 use bp_eth_poa::AuraHeader as SubstrateEthereumHeader;
@@ -24,7 +24,9 @@ use codec::{Decode, Encode};
 use headers_relay::sync_types::SubmittedHeaders;
 use relay_ethereum_client::types::HeaderId as EthereumHeaderId;
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
-use relay_substrate_client::{Client as SubstrateClient, TransactionSignScheme, UnsignedTransaction};
+use relay_substrate_client::{
+	Client as SubstrateClient, TransactionSignScheme, UnsignedTransaction,
+};
 use relay_utils::HeaderId;
 use sp_core::{crypto::Pair, Bytes};
 use std::{collections::VecDeque, sync::Arc};
@@ -33,7 +35,8 @@ const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_req
 const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block";
 const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block";
 const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block";
-const EXCH_API_FILTER_TRANSACTION_PROOF: &str = "RialtoCurrencyExchangeApi_filter_transaction_proof";
+const EXCH_API_FILTER_TRANSACTION_PROOF: &str =
+	"RialtoCurrencyExchangeApi_filter_transaction_proof";
 
 type RpcResult<T> = std::result::Result<T, RpcError>;
 
@@ -58,7 +61,8 @@ impl SubstrateHighLevelRpc for SubstrateClient<Rialto> {
 		let data = Bytes(Vec::new());
 
 		let encoded_response = self.state_call(call, data, None).await?;
-		let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?;
+		let decoded_response: (u64, bp_eth_poa::H256) =
+			Decode::decode(&mut &encoded_response.0[..])?;
 
 		let best_header_id = HeaderId(decoded_response.0, decoded_response.1);
 		Ok(best_header_id)
@@ -69,7 +73,8 @@ impl SubstrateHighLevelRpc for SubstrateClient<Rialto> {
 		let data = Bytes(Vec::new());
 
 		let encoded_response = self.state_call(call, data, None).await?;
-		let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?;
+		let decoded_response: (u64, bp_eth_poa::H256) =
+			Decode::decode(&mut &encoded_response.0[..])?;
 
 		let best_header_id = HeaderId(decoded_response.0, decoded_response.1);
 		Ok(best_header_id)
@@ -157,17 +162,23 @@ impl SubmitEthereumHeaders for SubstrateClient<Rialto> {
 		let ids = headers.iter().map(|header| header.id()).collect();
 		let genesis_hash = *self.genesis_hash();
 		let submission_result = async {
-			self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), move |_, transaction_nonce| {
-				Bytes(
-					Rialto::sign_transaction(
-						genesis_hash,
-						&params,
-						relay_substrate_client::TransactionEra::immortal(),
-						UnsignedTransaction::new(instance.build_signed_header_call(headers), transaction_nonce),
+			self.submit_signed_extrinsic(
+				(*params.public().as_array_ref()).into(),
+				move |_, transaction_nonce| {
+					Bytes(
+						Rialto::sign_transaction(
+							genesis_hash,
+							&params,
+							relay_substrate_client::TransactionEra::immortal(),
+							UnsignedTransaction::new(
+								instance.build_signed_header_call(headers),
+								transaction_nonce,
+							),
+						)
+						.encode(),
 					)
-					.encode(),
-				)
-			})
+				},
+			)
 			.await?;
 			Ok(())
 		}
@@ -209,8 +220,8 @@ impl SubmitEthereumHeaders for SubstrateClient<Rialto> {
 					submitted_headers.rejected.push(id);
 					submitted_headers.rejected.extend(ids);
 					submitted_headers.fatal_error = Some(error.into());
-					break;
-				}
+					break
+				},
 			}
 		}
 
@@ -259,23 +270,31 @@ impl SubmitEthereumExchangeTransactionProof for SubstrateClient<Rialto> {
 		proof: rialto_runtime::exchange::EthereumTransactionInclusionProof,
 	) -> RpcResult<()> {
 		let genesis_hash = *self.genesis_hash();
-		self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), move |_, transaction_nonce| {
-			Bytes(
-				Rialto::sign_transaction(
-					genesis_hash,
-					&params,
-					relay_substrate_client::TransactionEra::immortal(),
-					UnsignedTransaction::new(instance.build_currency_exchange_call(proof), transaction_nonce),
+		self.submit_signed_extrinsic(
+			(*params.public().as_array_ref()).into(),
+			move |_, transaction_nonce| {
+				Bytes(
+					Rialto::sign_transaction(
+						genesis_hash,
+						&params,
+						relay_substrate_client::TransactionEra::immortal(),
+						UnsignedTransaction::new(
+							instance.build_currency_exchange_call(proof),
+							transaction_nonce,
+						),
+					)
+					.encode(),
 				)
-				.encode(),
-			)
-		})
+			},
+		)
 		.await?;
 		Ok(())
 	}
 }
 
 /// Create unsigned Substrate transaction for submitting Ethereum header.
-fn create_unsigned_submit_transaction(call: rialto_runtime::Call) -> rialto_runtime::UncheckedExtrinsic {
+fn create_unsigned_submit_transaction(
+	call: rialto_runtime::Call,
+) -> rialto_runtime::UncheckedExtrinsic {
 	rialto_runtime::UncheckedExtrinsic::new_unsigned(call)
 }
diff --git a/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs b/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
index 7924661244767c2e3e106d17a684114f0002ea13..4b5bd4fa7326c695bc91faa11e3e5b8831a6970a 100644
--- a/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
+++ b/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
@@ -16,8 +16,7 @@
 
 //! Rialto-Substrate -> Ethereum PoA synchronization.
 
-use crate::ethereum_client::EthereumHighLevelRpc;
-use crate::rpc_errors::RpcError;
+use crate::{ethereum_client::EthereumHighLevelRpc, rpc_errors::RpcError};
 
 use async_trait::async_trait;
 use codec::Encode;
@@ -38,8 +37,7 @@ use relay_substrate_client::{
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient};
 use sp_runtime::EncodedJustification;
 
-use std::fmt::Debug;
-use std::{collections::HashSet, time::Duration};
+use std::{collections::HashSet, fmt::Debug, time::Duration};
 
 pub mod consts {
 	use super::*;
@@ -50,7 +48,8 @@ pub mod consts {
 	pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8;
 	/// Max Ethereum headers count we want to have in 'submitted' state.
 	pub const MAX_SUBMITTED_HEADERS: usize = 4;
-	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned).
+	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten
+	/// (pruned).
 	pub const PRUNE_DEPTH: u32 = 256;
 }
 
@@ -110,11 +109,7 @@ struct EthereumHeadersTarget {
 
 impl EthereumHeadersTarget {
 	fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self {
-		Self {
-			client,
-			contract,
-			sign_params,
-		}
+		Self { client, contract, sign_params }
 	}
 }
 
@@ -137,11 +132,17 @@ impl TargetClient<SubstrateHeadersSyncPipeline> for EthereumHeadersTarget {
 		self.client.best_substrate_block(self.contract).await
 	}
 
-	async fn is_known_header(&self, id: RialtoHeaderId) -> Result<(RialtoHeaderId, bool), RpcError> {
+	async fn is_known_header(
+		&self,
+		id: RialtoHeaderId,
+	) -> Result<(RialtoHeaderId, bool), RpcError> {
 		self.client.substrate_header_known(self.contract, id).await
 	}
 
-	async fn submit_headers(&self, headers: Vec<QueuedRialtoHeader>) -> SubmittedHeaders<RialtoHeaderId, RpcError> {
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedRialtoHeader>,
+	) -> SubmittedHeaders<RialtoHeaderId, RpcError> {
 		self.client
 			.submit_substrate_headers(self.sign_params.clone(), self.contract, headers)
 			.await
@@ -161,7 +162,10 @@ impl TargetClient<SubstrateHeadersSyncPipeline> for EthereumHeadersTarget {
 			.await
 	}
 
-	async fn requires_extra(&self, header: QueuedRialtoHeader) -> Result<(RialtoHeaderId, bool), RpcError> {
+	async fn requires_extra(
+		&self,
+		header: QueuedRialtoHeader,
+	) -> Result<(RialtoHeaderId, bool), RpcError> {
 		Ok((header.header().id(), false))
 	}
 }
diff --git a/bridges/relays/bin-ethereum/src/substrate_types.rs b/bridges/relays/bin-ethereum/src/substrate_types.rs
index af68d7e0285557d4fc0edfd753dd1771c6a49e69..f9e6c29c6a65022f52cc6c8d35bf3fd29e5941b2 100644
--- a/bridges/relays/bin-ethereum/src/substrate_types.rs
+++ b/bridges/relays/bin-ethereum/src/substrate_types.rs
@@ -17,11 +17,12 @@
 //! Converting between Ethereum headers and bridge module types.
 
 use bp_eth_poa::{
-	AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, Receipt as SubstrateEthereumReceipt,
-	TransactionOutcome as SubstrateEthereumTransactionOutcome,
+	AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry,
+	Receipt as SubstrateEthereumReceipt, TransactionOutcome as SubstrateEthereumTransactionOutcome,
 };
 use relay_ethereum_client::types::{
-	Header as EthereumHeader, Receipt as EthereumReceipt, HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF,
+	Header as EthereumHeader, Receipt as EthereumReceipt,
+	HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF,
 };
 
 /// Convert Ethereum header into Ethereum header for Substrate.
@@ -68,7 +69,8 @@ pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEt
 			})
 			.collect(),
 		outcome: match (receipt.status, receipt.root) {
-			(Some(status), None) => SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8),
+			(Some(status), None) =>
+				SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8),
 			(None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root),
 			_ => SubstrateEthereumTransactionOutcome::Unknown,
 		},
diff --git a/bridges/relays/bin-substrate/src/chains/kusama.rs b/bridges/relays/bin-substrate/src/chains/kusama.rs
index 6c7711316626eef5855f2dfd38040250ec68b576..f0c11f6abb83f8c5f360a546c55aca865aa3f9bb 100644
--- a/bridges/relays/bin-substrate/src/chains/kusama.rs
+++ b/bridges/relays/bin-substrate/src/chains/kusama.rs
@@ -41,41 +41,41 @@ impl CliEncodeCall for Kusama {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_kusama_client::runtime::Call::System(relay_kusama_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_kusama_client::runtime::Call::System(
+				relay_kusama_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::KUSAMA_TO_POLKADOT_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_kusama_client::runtime::Call::BridgePolkadotMessages(
-						relay_kusama_client::runtime::BridgePolkadotMessagesCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::KUSAMA_TO_POLKADOT_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_kusama_client::runtime::Call::BridgePolkadotMessages(
+							relay_kusama_client::runtime::BridgePolkadotMessagesCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("Unsupported Kusama call: {:?}", call),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_kusama_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_kusama_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_kusama_client::runtime::Call::System(relay_kusama_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: crate::chains::kusama::SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_kusama_client::runtime::Call::System(
+				relay_kusama_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::kusama::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Kusama call: {:?}", call),
 		}
 	}
@@ -95,7 +95,9 @@ impl CliChain for Kusama {
 		bp_kusama::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		Err("Sending messages from Kusama is not yet supported.".into())
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs
index b3b7d956bcc51136594c805d1de986323809aab6..1e3cd2ec7d889e6ccf291ee9125d387a12e13947 100644
--- a/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs
+++ b/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs
@@ -24,13 +24,15 @@ use relay_kusama_client::{Kusama, SyncHeader as KusamaSyncHeader};
 use relay_polkadot_client::{Polkadot, SigningParams as PolkadotSigningParams};
 use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
 /// relay as gone wild.
 ///
-/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 21 DOT,
-/// but let's round up to 30 DOT here.
+/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 21
+/// DOT, but let's round up to 30 DOT here.
 pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 30_000_000_000;
 
 /// Kusama-to-Polkadot finality sync pipeline.
@@ -45,7 +47,10 @@ pub(crate) struct KusamaFinalityToPolkadot {
 impl KusamaFinalityToPolkadot {
 	pub fn new(target_client: Client<Polkadot>, target_sign: PolkadotSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -53,7 +58,8 @@ impl KusamaFinalityToPolkadot {
 impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot {
 	type FinalitySyncPipeline = FinalityPipelineKusamaFinalityToPolkadot;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
 
 	type TargetChain = Polkadot;
 
@@ -116,29 +122,36 @@ pub(crate) mod tests {
 		B: From<u32> + std::ops::Mul<Output = B>,
 		W: WeightToFeePolynomial<Balance = B>,
 	{
-		// we assume that the GRANDPA is not lagging here => ancestry length will be near to 0 (let's round up to 2)
+		// we assume that the GRANDPA is not lagging here => ancestry length will be near to 0
+		// (let's round up to 2)
 		const AVG_VOTES_ANCESTRIES_LEN: u32 = 2;
-		// let's assume number of validators is 1024 (more than on any existing well-known chain atm)
-		// => number of precommits is *2/3 + 1
+		// let's assume number of validators is 1024 (more than on any existing well-known chain
+		// atm) => number of precommits is *2/3 + 1
 		const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1;
 
 		// GRANDPA pallet weights. We're now using Rialto weights everywhere.
 		//
-		// Using Rialto runtime is slightly incorrect, because `DbWeight` of other runtimes may differ
-		// from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is the same.
-		type GrandpaPalletWeights = pallet_bridge_grandpa::weights::RialtoWeight<rialto_runtime::Runtime>;
+		// Using Rialto runtime is slightly incorrect, because `DbWeight` of other runtimes may
+		// differ from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is
+		// the same.
+		type GrandpaPalletWeights =
+			pallet_bridge_grandpa::weights::RialtoWeight<rialto_runtime::Runtime>;
 
-		// The following formula shall not be treated as super-accurate - guard is to protect from mad relays,
-		// not to protect from over-average loses.
+		// The following formula shall not be treated as super-accurate - guard is to protect from
+		// mad relays, not to protect from over-average loses.
 
 		// increase number of headers a bit
 		let expected_source_headers_per_day = expected_source_headers_per_day * 110 / 100;
-		let single_source_header_submit_call_weight =
-			GrandpaPalletWeights::submit_finality_proof(AVG_VOTES_ANCESTRIES_LEN, AVG_PRECOMMITS_LEN);
-		// for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + adjusted fee
+		let single_source_header_submit_call_weight = GrandpaPalletWeights::submit_finality_proof(
+			AVG_VOTES_ANCESTRIES_LEN,
+			AVG_PRECOMMITS_LEN,
+		);
+		// for simplicity - add extra weight for base tx fee + fee that is paid for the tx size +
+		// adjusted fee
 		let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2;
 		let single_source_header_tx_cost = W::calc(&single_source_header_submit_tx_weight);
-		let maximal_expected_decrease = single_source_header_tx_cost * B::from(expected_source_headers_per_day);
+		let maximal_expected_decrease =
+			single_source_header_tx_cost * B::from(expected_source_headers_per_day);
 
 		maximal_expected_decrease
 	}
diff --git a/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs b/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs
index 5b07e6d588a6c8b603d5227663c99b45d8e6b1e9..e006532d978a4a44f1754bab5af91c96c6253612 100644
--- a/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs
+++ b/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs
@@ -25,17 +25,23 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_kusama_client::{HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams};
-use relay_polkadot_client::{HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams};
+use relay_kusama_client::{
+	HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams,
+};
+use relay_polkadot_client::{
+	HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams,
+};
 use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use sp_runtime::{FixedPointNumber, FixedU128};
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Kusama-to-Polkadot message lane.
 pub type MessageLaneKusamaMessagesToPolkadot =
@@ -49,24 +55,32 @@ pub struct KusamaMessagesToPolkadot {
 impl SubstrateMessageLane for KusamaMessagesToPolkadot {
 	type MessageLane = MessageLaneKusamaMessagesToPolkadot;
 
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_polkadot::TO_POLKADOT_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_polkadot::TO_POLKADOT_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_polkadot::TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD;
 	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
 		bp_polkadot::TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_kusama::FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_kusama::FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_kusama::FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_kusama::FROM_KUSAMA_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_kusama::FROM_KUSAMA_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
 
-	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
-	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str =
+		bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str =
+		bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_polkadot::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_polkadot::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Kusama;
 	type TargetChain = Polkadot;
@@ -117,11 +131,7 @@ impl SubstrateMessageLane for KusamaMessagesToPolkadot {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
@@ -180,14 +190,14 @@ pub async fn run(
 	// we don't know exact weights of the Polkadot runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_polkadot::max_extrinsic_weight(),
 			bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -219,8 +229,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_polkadot::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_polkadot::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
@@ -252,8 +264,10 @@ pub(crate) fn add_standalone_metrics(
 	metrics_params: MetricsParams,
 	source_client: Client<Kusama>,
 ) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
-	let polkadot_to_kusama_conversion_rate_key =
-		bp_runtime::storage_parameter_key(bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME).0;
+	let polkadot_to_kusama_conversion_rate_key = bp_runtime::storage_parameter_key(
+		bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME,
+	)
+	.0;
 
 	substrate_relay_helper::messages_lane::add_standalone_metrics::<KusamaMessagesToPolkadot>(
 		metrics_prefix,
diff --git a/bridges/relays/bin-substrate/src/chains/millau.rs b/bridges/relays/bin-substrate/src/chains/millau.rs
index 7a86455df939fd88ba05f671c297d44c9b8d8fd4..004e4c740a4737a2e4e25dbb7098ad6f4db30e3e 100644
--- a/bridges/relays/bin-substrate/src/chains/millau.rs
+++ b/bridges/relays/bin-substrate/src/chains/millau.rs
@@ -37,31 +37,26 @@ impl CliEncodeCall for Millau {
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
 			Call::Raw { data } => Decode::decode(&mut &*data.0)?,
-			Call::Remark { remark_payload, .. } => millau_runtime::Call::System(millau_runtime::SystemCall::remark(
-				remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-			)),
+			Call::Remark { remark_payload, .. } =>
+				millau_runtime::Call::System(millau_runtime::SystemCall::remark(
+					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				)),
 			Call::Transfer { recipient, amount } => millau_runtime::Call::Balances(
 				millau_runtime::BalancesCall::transfer(recipient.raw_id(), amount.cast()),
 			),
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::MILLAU_TO_RIALTO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					millau_runtime::Call::BridgeRialtoMessages(millau_runtime::MessagesCall::send_message(
-						lane.0,
-						payload,
-						fee.cast(),
-					))
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
-				),
-			},
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::MILLAU_TO_RIALTO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						millau_runtime::Call::BridgeRialtoMessages(
+							millau_runtime::MessagesCall::send_message(lane.0, payload, fee.cast()),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 		})
 	}
 
@@ -74,7 +69,12 @@ impl CliChain for Millau {
 	const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION;
 
 	type KeyPair = sp_core::sr25519::Pair;
-	type MessagePayload = MessagePayload<bp_millau::AccountId, bp_rialto::AccountSigner, bp_rialto::Signature, Vec<u8>>;
+	type MessagePayload = MessagePayload<
+		bp_millau::AccountId,
+		bp_rialto::AccountSigner,
+		bp_rialto::Signature,
+		Vec<u8>,
+	>;
 
 	fn ss58_format() -> u16 {
 		millau_runtime::SS58Prefix::get() as u16
@@ -85,7 +85,9 @@ impl CliChain for Millau {
 	}
 
 	// TODO [#854|#843] support multiple bridges?
-	fn encode_message(message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		match message {
 			encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0)
 				.map_err(|e| format!("Failed to decode Millau's MessagePayload: {:?}", e)),
@@ -96,7 +98,10 @@ impl CliChain for Millau {
 				sender.enforce_chain::<Source>();
 				let spec_version = Target::RUNTIME_VERSION.spec_version;
 				let origin = CallOrigin::SourceAccount(sender.raw_id());
-				encode_call::preprocess_call::<Source, Target>(&mut call, bridge::MILLAU_TO_RIALTO_INDEX);
+				encode_call::preprocess_call::<Source, Target>(
+					&mut call,
+					bridge::MILLAU_TO_RIALTO_INDEX,
+				);
 				let call = Target::encode_call(&call).map_err(|e| e.to_string())?;
 				let weight = call.get_dispatch_info().weight;
 
@@ -107,7 +112,7 @@ impl CliChain for Millau {
 					&call,
 					DispatchFeePayment::AtSourceChain,
 				))
-			}
+			},
 		}
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
index f0ea225485bd468f26073d2599ba12c7766bf1b2..e1dc19594f4f2e1f233e404b956300b2e81fc3cd 100644
--- a/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
+++ b/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
@@ -23,10 +23,13 @@ use bp_header_chain::justification::GrandpaJustification;
 use relay_millau_client::{Millau, SyncHeader as MillauSyncHeader};
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
 use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Millau-to-Rialto finality sync pipeline.
-pub(crate) type FinalityPipelineMillauToRialto = SubstrateFinalityToSubstrate<Millau, Rialto, RialtoSigningParams>;
+pub(crate) type FinalityPipelineMillauToRialto =
+	SubstrateFinalityToSubstrate<Millau, Rialto, RialtoSigningParams>;
 
 #[derive(Clone, Debug)]
 pub(crate) struct MillauFinalityToRialto {
@@ -35,16 +38,15 @@ pub(crate) struct MillauFinalityToRialto {
 
 impl MillauFinalityToRialto {
 	pub fn new(target_client: Client<Rialto>, target_sign: RialtoSigningParams) -> Self {
-		Self {
-			finality_pipeline: FinalityPipelineMillauToRialto::new(target_client, target_sign),
-		}
+		Self { finality_pipeline: FinalityPipelineMillauToRialto::new(target_client, target_sign) }
 	}
 }
 
 impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto {
 	type FinalitySyncPipeline = FinalityPipelineMillauToRialto;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
 
 	type TargetChain = Rialto;
 
@@ -59,8 +61,11 @@ impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto {
 		header: MillauSyncHeader,
 		proof: GrandpaJustification<bp_millau::Header>,
 	) -> Bytes {
-		let call =
-			rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(Box::new(header.into_inner()), proof).into();
+		let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(
+			Box::new(header.into_inner()),
+			proof,
+		)
+		.into();
 
 		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
 		let transaction = Rialto::sign_transaction(
diff --git a/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
index b0c63ff088365fabe3450048740c0dcbf1066352..7682f32e608e433b86c95becc70642108e726295 100644
--- a/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
+++ b/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
@@ -26,16 +26,22 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams};
-use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams};
+use relay_millau_client::{
+	HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams,
+};
+use relay_rialto_client::{
+	HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams,
+};
 use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Millau-to-Rialto message lane.
 pub type MessageLaneMillauMessagesToRialto =
@@ -49,23 +55,30 @@ pub struct MillauMessagesToRialto {
 impl SubstrateMessageLane for MillauMessagesToRialto {
 	type MessageLane = MessageLaneMillauMessagesToRialto;
 
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
 
 	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME;
 	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Millau;
 	type TargetChain = Rialto;
@@ -82,7 +95,8 @@ impl SubstrateMessageLane for MillauMessagesToRialto {
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call: millau_runtime::Call =
-			millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into();
+			millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state)
+				.into();
 		let call_weight = call.get_dispatch_info().weight;
 		let genesis_hash = *self.message_lane.source_client.genesis_hash();
 		let transaction = Millau::sign_transaction(
@@ -114,11 +128,7 @@ impl SubstrateMessageLane for MillauMessagesToRialto {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 		let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof(
 			self.message_lane.relayer_id_at_source.clone(),
@@ -176,7 +186,9 @@ pub async fn run(
 	let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() / 3;
 	// TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<millau_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<millau_runtime::Runtime>,
+		>(
 			bp_rialto::max_extrinsic_weight(),
 			bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
@@ -211,8 +223,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
@@ -251,7 +265,9 @@ pub(crate) fn add_standalone_metrics(
 		Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID),
 		Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID),
 		Some((
-			sp_core::storage::StorageKey(millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec()),
+			sp_core::storage::StorageKey(
+				millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(),
+			),
 			millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE,
 		)),
 	)
diff --git a/bridges/relays/bin-substrate/src/chains/mod.rs b/bridges/relays/bin-substrate/src/chains/mod.rs
index bdc4f2628ee56ce2f22a4f4b6c1164f3eaec3871..ea7c9754ec72ab1091eb7fcc1478a9f2a70c292c 100644
--- a/bridges/relays/bin-substrate/src/chains/mod.rs
+++ b/bridges/relays/bin-substrate/src/chains/mod.rs
@@ -38,9 +38,9 @@ mod rococo;
 mod westend;
 mod wococo;
 
-// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we want to
-// test our code that is intended to work with real-value chains. So to keep it close to 1:1, we'll be treating
-// Rialto as BTC and Millau as wBTC (only in relayer).
+// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we
+// want to test our code that is intended to work with real-value chains. So to keep it close to
+// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer).
 
 /// The identifier of token, which value is associated with Rialto token value by relayer.
 pub(crate) const RIALTO_ASSOCIATED_TOKEN_ID: &str = polkadot::TOKEN_ID;
@@ -53,8 +53,8 @@ pub(crate) fn add_polkadot_kusama_price_metrics<T: finality_relay::FinalitySyncP
 	prefix: Option<String>,
 	params: MetricsParams,
 ) -> anyhow::Result<MetricsParams> {
-	// Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> Kusama
-	// relays, but we want to test metrics/dashboards in advance
+	// Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <->
+	// Kusama relays, but we want to test metrics/dashboards in advance
 	Ok(relay_utils::relay_metrics(prefix, params)
 		.standalone_metric(|registry, prefix| {
 			substrate_relay_helper::helpers::token_price_metric(registry, prefix, "polkadot")
@@ -92,7 +92,8 @@ mod tests {
 			rialto_runtime::VERSION.spec_version,
 		);
 
-		let rialto_signer = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap();
+		let rialto_signer =
+			relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap();
 		let signature = rialto_signer.sign(&digest);
 
 		assert!(signature.verify(&digest[..], &rialto_signer.public()));
@@ -113,7 +114,8 @@ mod tests {
 			millau_runtime::VERSION.spec_version,
 		);
 
-		let millau_signer = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap();
+		let millau_signer =
+			relay_millau_client::SigningParams::from_string("//Dave", None).unwrap();
 		let signature = millau_signer.sign(&digest);
 
 		assert!(signature.verify(&digest[..], &millau_signer.public()));
@@ -128,7 +130,8 @@ mod tests {
 			bp_millau::max_extrinsic_size(),
 		);
 
-		let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into();
+		let call: millau_runtime::Call =
+			millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into();
 		let payload = send_message::message_payload(
 			Default::default(),
 			call.get_dispatch_info().weight,
@@ -164,8 +167,9 @@ mod tests {
 	fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() {
 		use rialto_runtime::millau_messages::Millau;
 
-		let maximal_dispatch_weight =
-			send_message::compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight());
+		let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight(
+			bp_millau::max_extrinsic_weight(),
+		);
 		let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into();
 
 		let payload = send_message::message_payload(
@@ -191,8 +195,9 @@ mod tests {
 	fn maximal_weight_fill_block_to_rialto_is_generated_correctly() {
 		use millau_runtime::rialto_messages::Rialto;
 
-		let maximal_dispatch_weight =
-			send_message::compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight());
+		let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight(
+			bp_rialto::max_extrinsic_weight(),
+		);
 		let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into();
 
 		let payload = send_message::message_payload(
@@ -325,7 +330,10 @@ mod westend_tests {
 			votes_ancestries: vec![],
 		};
 
-		let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(header.clone(), justification.clone());
+		let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(
+			header.clone(),
+			justification.clone(),
+		);
 		let expected = millau_runtime::BridgeGrandpaRialtoCall::<millau_runtime::Runtime>::submit_finality_proof(
 			Box::new(header),
 			justification,
diff --git a/bridges/relays/bin-substrate/src/chains/polkadot.rs b/bridges/relays/bin-substrate/src/chains/polkadot.rs
index 372bdb90efcb4f21393c2de2e87d24bb3f33815d..6baeb50c14a35e3ad3665a6a4ab10438dc447ff1 100644
--- a/bridges/relays/bin-substrate/src/chains/polkadot.rs
+++ b/bridges/relays/bin-substrate/src/chains/polkadot.rs
@@ -41,41 +41,41 @@ impl CliEncodeCall for Polkadot {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_polkadot_client::runtime::Call::System(relay_polkadot_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_polkadot_client::runtime::Call::System(
+				relay_polkadot_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::POLKADOT_TO_KUSAMA_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
-						relay_polkadot_client::runtime::BridgeKusamaMessagesCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::POLKADOT_TO_KUSAMA_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
+							relay_polkadot_client::runtime::BridgeKusamaMessagesCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("Unsupported Polkadot call: {:?}", call),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_polkadot_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_polkadot_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_polkadot_client::runtime::Call::System(relay_polkadot_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: crate::chains::polkadot::SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_polkadot_client::runtime::Call::System(
+				relay_polkadot_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::polkadot::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Polkadot call: {:?}", call),
 		}
 	}
@@ -95,7 +95,9 @@ impl CliChain for Polkadot {
 		bp_polkadot::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		Err("Sending messages from Polkadot is not yet supported.".into())
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs
index c225b77d546aa743f6ebac0055488ed98395b2dd..603d5ba3aa779d9b2b1988ab4622b1e38ff47a5a 100644
--- a/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs
+++ b/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs
@@ -24,13 +24,15 @@ use relay_kusama_client::{Kusama, SigningParams as KusamaSigningParams};
 use relay_polkadot_client::{Polkadot, SyncHeader as PolkadotSyncHeader};
 use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
 /// relay as gone wild.
 ///
-/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 0.001 KSM,
-/// but let's round up to 0.1 KSM here.
+/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 0.001
+/// KSM, but let's round up to 0.1 KSM here.
 pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 100_000_000_000;
 
 /// Polkadot-to-Kusama finality sync pipeline.
@@ -45,7 +47,10 @@ pub(crate) struct PolkadotFinalityToKusama {
 impl PolkadotFinalityToKusama {
 	pub fn new(target_client: Client<Kusama>, target_sign: KusamaSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelinePolkadotFinalityToKusama::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelinePolkadotFinalityToKusama::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -53,7 +58,8 @@ impl PolkadotFinalityToKusama {
 impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama {
 	type FinalitySyncPipeline = FinalityPipelinePolkadotFinalityToKusama;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
 
 	type TargetChain = Kusama;
 
diff --git a/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs b/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs
index 092a02598d1b42a70c0227c0c16a9a0ba7c70129..71943af44da366a1cc5f48025415b762d531c5ce 100644
--- a/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs
+++ b/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs
@@ -25,17 +25,23 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_kusama_client::{HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams};
-use relay_polkadot_client::{HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams};
+use relay_kusama_client::{
+	HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams,
+};
+use relay_polkadot_client::{
+	HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams,
+};
 use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use sp_runtime::{FixedPointNumber, FixedU128};
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Polkadot-to-Kusama message lane.
 pub type MessageLanePolkadotMessagesToKusama =
@@ -48,24 +54,32 @@ pub struct PolkadotMessagesToKusama {
 
 impl SubstrateMessageLane for PolkadotMessagesToKusama {
 	type MessageLane = MessageLanePolkadotMessagesToKusama;
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_kusama::TO_KUSAMA_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_kusama::TO_KUSAMA_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_kusama::TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_kusama::TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_kusama::TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
 
 	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
 		bp_polkadot::FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_polkadot::FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_polkadot::FROM_POLKADOT_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_polkadot::FROM_POLKADOT_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
 
-	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
-	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str =
+		bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str =
+		bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_kusama::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_kusama::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Polkadot;
 	type TargetChain = Kusama;
@@ -116,11 +130,7 @@ impl SubstrateMessageLane for PolkadotMessagesToKusama {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_kusama_client::runtime::Call::BridgePolkadotMessages(
@@ -179,14 +189,14 @@ pub async fn run(
 	// we don't know exact weights of the Kusama runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_kusama::max_extrinsic_weight(),
 			bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -218,8 +228,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_kusama::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_kusama::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
@@ -251,8 +263,10 @@ pub(crate) fn add_standalone_metrics(
 	metrics_params: MetricsParams,
 	source_client: Client<Polkadot>,
 ) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
-	let kusama_to_polkadot_conversion_rate_key =
-		bp_runtime::storage_parameter_key(bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME).0;
+	let kusama_to_polkadot_conversion_rate_key = bp_runtime::storage_parameter_key(
+		bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME,
+	)
+	.0;
 
 	substrate_relay_helper::messages_lane::add_standalone_metrics::<PolkadotMessagesToKusama>(
 		metrics_prefix,
diff --git a/bridges/relays/bin-substrate/src/chains/rialto.rs b/bridges/relays/bin-substrate/src/chains/rialto.rs
index 2e27342baa3eebb23e184e2ce906d3800e3d4978..0575896d97fdb37b96829ac088a66d0465fdcafd 100644
--- a/bridges/relays/bin-substrate/src/chains/rialto.rs
+++ b/bridges/relays/bin-substrate/src/chains/rialto.rs
@@ -37,29 +37,26 @@ impl CliEncodeCall for Rialto {
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
 			Call::Raw { data } => Decode::decode(&mut &*data.0)?,
-			Call::Remark { remark_payload, .. } => rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(
-				remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-			)),
+			Call::Remark { remark_payload, .. } =>
+				rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(
+					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				)),
 			Call::Transfer { recipient, amount } => rialto_runtime::Call::Balances(
 				rialto_runtime::BalancesCall::transfer(recipient.raw_id().into(), amount.0),
 			),
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::RIALTO_TO_MILLAU_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					rialto_runtime::Call::BridgeMillauMessages(rialto_runtime::MessagesCall::send_message(
-						lane.0, payload, fee.0,
-					))
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
-				),
-			},
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::RIALTO_TO_MILLAU_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						rialto_runtime::Call::BridgeMillauMessages(
+							rialto_runtime::MessagesCall::send_message(lane.0, payload, fee.0),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 		})
 	}
 
@@ -72,7 +69,12 @@ impl CliChain for Rialto {
 	const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION;
 
 	type KeyPair = sp_core::sr25519::Pair;
-	type MessagePayload = MessagePayload<bp_rialto::AccountId, bp_millau::AccountSigner, bp_millau::Signature, Vec<u8>>;
+	type MessagePayload = MessagePayload<
+		bp_rialto::AccountId,
+		bp_millau::AccountSigner,
+		bp_millau::Signature,
+		Vec<u8>,
+	>;
 
 	fn ss58_format() -> u16 {
 		rialto_runtime::SS58Prefix::get() as u16
@@ -82,7 +84,9 @@ impl CliChain for Rialto {
 		bp_rialto::max_extrinsic_weight()
 	}
 
-	fn encode_message(message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		match message {
 			encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0)
 				.map_err(|e| format!("Failed to decode Rialto's MessagePayload: {:?}", e)),
@@ -93,7 +97,10 @@ impl CliChain for Rialto {
 				sender.enforce_chain::<Source>();
 				let spec_version = Target::RUNTIME_VERSION.spec_version;
 				let origin = CallOrigin::SourceAccount(sender.raw_id());
-				encode_call::preprocess_call::<Source, Target>(&mut call, bridge::RIALTO_TO_MILLAU_INDEX);
+				encode_call::preprocess_call::<Source, Target>(
+					&mut call,
+					bridge::RIALTO_TO_MILLAU_INDEX,
+				);
 				let call = Target::encode_call(&call).map_err(|e| e.to_string())?;
 				let weight = call.get_dispatch_info().weight;
 
@@ -104,7 +111,7 @@ impl CliChain for Rialto {
 					&call,
 					DispatchFeePayment::AtSourceChain,
 				))
-			}
+			},
 		}
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
index df0f17d94431af702255e5b4eef56bef072c5ed5..79ce6160b87f6fcf776282cb2b9ddf1e790a36c0 100644
--- a/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
+++ b/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
@@ -23,7 +23,9 @@ use bp_header_chain::justification::GrandpaJustification;
 use relay_millau_client::{Millau, SigningParams as MillauSigningParams};
 use relay_rialto_client::{Rialto, SyncHeader as RialtoSyncHeader};
 use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Rialto-to-Millau finality sync pipeline.
 pub(crate) type FinalityPipelineRialtoFinalityToMillau =
@@ -37,7 +39,10 @@ pub struct RialtoFinalityToMillau {
 impl RialtoFinalityToMillau {
 	pub fn new(target_client: Client<Millau>, target_sign: MillauSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelineRialtoFinalityToMillau::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelineRialtoFinalityToMillau::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -45,7 +50,8 @@ impl RialtoFinalityToMillau {
 impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau {
 	type FinalitySyncPipeline = FinalityPipelineRialtoFinalityToMillau;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
 
 	type TargetChain = Millau;
 
diff --git a/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
index 0196329fb812dcbd45dfa0c2eb30d875ed06b0a5..529f14b84a3da9b2c49baa7e54371ba5d937aca6 100644
--- a/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
+++ b/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
@@ -26,16 +26,22 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams};
-use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams};
+use relay_millau_client::{
+	HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams,
+};
+use relay_rialto_client::{
+	HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams,
+};
 use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Rialto-to-Millau message lane.
 pub type MessageLaneRialtoMessagesToMillau =
@@ -49,23 +55,30 @@ pub struct RialtoMessagesToMillau {
 impl SubstrateMessageLane for RialtoMessagesToMillau {
 	type MessageLane = MessageLaneRialtoMessagesToMillau;
 
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
 
 	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME;
 	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Rialto;
 	type TargetChain = Millau;
@@ -82,7 +95,8 @@ impl SubstrateMessageLane for RialtoMessagesToMillau {
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call: rialto_runtime::Call =
-			rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into();
+			rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state)
+				.into();
 		let call_weight = call.get_dispatch_info().weight;
 		let genesis_hash = *self.message_lane.source_client.genesis_hash();
 		let transaction = Rialto::sign_transaction(
@@ -114,11 +128,7 @@ impl SubstrateMessageLane for RialtoMessagesToMillau {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 		let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof(
 			self.message_lane.relayer_id_at_source.clone(),
@@ -175,7 +185,9 @@ pub async fn run(
 	// 2/3 is reserved for proofs and tx overhead
 	let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() / 3;
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_millau::max_extrinsic_weight(),
 			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
@@ -210,8 +222,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
@@ -250,7 +264,9 @@ pub(crate) fn add_standalone_metrics(
 		Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID),
 		Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID),
 		Some((
-			sp_core::storage::StorageKey(rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec()),
+			sp_core::storage::StorageKey(
+				rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(),
+			),
 			rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE,
 		)),
 	)
diff --git a/bridges/relays/bin-substrate/src/chains/rococo.rs b/bridges/relays/bin-substrate/src/chains/rococo.rs
index ec34d9cd33fc7b5bb93957e64bf0b17239c4d2be..ddd0b6cc13b12a02e259f4d376695e6fa34e0a31 100644
--- a/bridges/relays/bin-substrate/src/chains/rococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/rococo.rs
@@ -38,41 +38,41 @@ impl CliEncodeCall for Rococo {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_rococo_client::runtime::Call::System(
+				relay_rococo_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::ROCOCO_TO_WOCOCO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_rococo_client::runtime::Call::BridgeMessagesWococo(
-						relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::ROCOCO_TO_WOCOCO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_rococo_client::runtime::Call::BridgeMessagesWococo(
+							relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("The call is not supported"),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_rococo_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_rococo_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_rococo_client::runtime::Call::System(
+				relay_rococo_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Rococo call: {:?}", call),
 		}
 	}
@@ -92,7 +92,9 @@ impl CliChain for Rococo {
 		bp_wococo::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		Err("Sending messages from Rococo is not yet supported.".into())
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
index 269dead8dc231bb1f44d2380aa887fe674789897..25fd97a90bab999327158b2801e3d261baed2673 100644
--- a/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
@@ -24,7 +24,9 @@ use relay_rococo_client::{Rococo, SyncHeader as RococoSyncHeader};
 use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_wococo_client::{SigningParams as WococoSigningParams, Wococo};
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY;
 
@@ -40,7 +42,10 @@ pub(crate) struct RococoFinalityToWococo {
 impl RococoFinalityToWococo {
 	pub fn new(target_client: Client<Wococo>, target_sign: WococoSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelineRococoFinalityToWococo::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelineRococoFinalityToWococo::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -48,7 +53,8 @@ impl RococoFinalityToWococo {
 impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo {
 	type FinalitySyncPipeline = FinalityPipelineRococoFinalityToWococo;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
 
 	type TargetChain = Wococo;
 
diff --git a/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
index de2d8c7263e5c7aa27dd4c7ff663cf6bc0f27bda..e26adcb9a27b1d2808fda7d05994bcbb12376304 100644
--- a/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
@@ -25,16 +25,22 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams};
+use relay_rococo_client::{
+	HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams,
+};
 use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo};
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use relay_wococo_client::{
+	HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo,
+};
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Rococo-to-Wococo message lane.
 pub type MessageLaneRococoMessagesToWococo =
@@ -48,23 +54,30 @@ pub struct RococoMessagesToWococo {
 impl SubstrateMessageLane for RococoMessagesToWococo {
 	type MessageLane = MessageLaneRococoMessagesToWococo;
 
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_wococo::TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_rococo::FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
 
 	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME;
 	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Rococo;
 	type TargetChain = Wococo;
@@ -115,11 +128,7 @@ impl SubstrateMessageLane for RococoMessagesToWococo {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo(
@@ -178,14 +187,14 @@ pub async fn run(
 	// we don't know exact weights of the Wococo runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_wococo::max_extrinsic_weight(),
 			bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -217,8 +226,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
diff --git a/bridges/relays/bin-substrate/src/chains/westend.rs b/bridges/relays/bin-substrate/src/chains/westend.rs
index 27621472d6d999e68bc3b6e6499880f70334a3d5..586e6ffd95760be53d8e2db2e23bd7e087a12c35 100644
--- a/bridges/relays/bin-substrate/src/chains/westend.rs
+++ b/bridges/relays/bin-substrate/src/chains/westend.rs
@@ -35,7 +35,9 @@ impl CliChain for Westend {
 		0
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		Err("Sending messages from Westend is not yet supported.".into())
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
index 4a5e48b51eeab9895c6bc4d2588cf6fdec67ec8e..1ea8fc7dd794eefac1398b9410f6866ce086076b 100644
--- a/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
+++ b/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
@@ -24,7 +24,9 @@ use relay_millau_client::{Millau, SigningParams as MillauSigningParams};
 use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend};
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Westend-to-Millau finality sync pipeline.
 pub(crate) type FinalityPipelineWestendFinalityToMillau =
@@ -38,7 +40,10 @@ pub(crate) struct WestendFinalityToMillau {
 impl WestendFinalityToMillau {
 	pub fn new(target_client: Client<Millau>, target_sign: MillauSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelineWestendFinalityToMillau::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelineWestendFinalityToMillau::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -46,7 +51,8 @@ impl WestendFinalityToMillau {
 impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau {
 	type FinalitySyncPipeline = FinalityPipelineWestendFinalityToMillau;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD;
 
 	type TargetChain = Millau;
 
diff --git a/bridges/relays/bin-substrate/src/chains/wococo.rs b/bridges/relays/bin-substrate/src/chains/wococo.rs
index 9b944d781685db098e2d41cf900990eada89c056..fbdbcf868c7c414486146132f61c4819e92252f9 100644
--- a/bridges/relays/bin-substrate/src/chains/wococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/wococo.rs
@@ -32,41 +32,41 @@ impl CliEncodeCall for Wococo {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_wococo_client::runtime::Call::System(
+				relay_wococo_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::WOCOCO_TO_ROCOCO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_wococo_client::runtime::Call::BridgeMessagesRococo(
-						relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::WOCOCO_TO_ROCOCO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_wococo_client::runtime::Call::BridgeMessagesRococo(
+							relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("The call is not supported"),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_wococo_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_wococo_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_wococo_client::runtime::Call::System(
+				relay_wococo_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Rococo call: {:?}", call),
 		}
 	}
@@ -86,7 +86,9 @@ impl CliChain for Wococo {
 		bp_wococo::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String> {
 		Err("Sending messages from Wococo is not yet supported.".into())
 	}
 }
diff --git a/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
index 74152cdff6d417db342b44c488330755e4860d33..8e11698c1bb663ceb2fcd5c99017f0a250df50dc 100644
--- a/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
@@ -24,7 +24,9 @@ use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams};
 use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_wococo_client::{SyncHeader as WococoSyncHeader, Wococo};
-use substrate_relay_helper::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
 /// relay as gone wild.
@@ -45,7 +47,10 @@ pub(crate) struct WococoFinalityToRococo {
 impl WococoFinalityToRococo {
 	pub fn new(target_client: Client<Rococo>, target_sign: RococoSigningParams) -> Self {
 		Self {
-			finality_pipeline: FinalityPipelineWococoFinalityToRococo::new(target_client, target_sign),
+			finality_pipeline: FinalityPipelineWococoFinalityToRococo::new(
+				target_client,
+				target_sign,
+			),
 		}
 	}
 }
@@ -53,7 +58,8 @@ impl WococoFinalityToRococo {
 impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo {
 	type FinalitySyncPipeline = FinalityPipelineWococoFinalityToRococo;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
 
 	type TargetChain = Rococo;
 
@@ -113,8 +119,10 @@ mod tests {
 	#[test]
 	fn maximal_balance_decrease_per_day_is_sane() {
 		// we expect Wococo -> Rococo relay to be running in all-headers mode
-		let maximal_balance_decrease =
-			compute_maximal_balance_decrease_per_day::<bp_kusama::Balance, bp_kusama::WeightToFee>(bp_wococo::DAYS);
+		let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::<
+			bp_kusama::Balance,
+			bp_kusama::WeightToFee,
+		>(bp_wococo::DAYS);
 		assert!(
 			MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease,
 			"Maximal expected loss per day {} is larger than hardcoded {}",
diff --git a/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
index 035c8f3948365d79fc30a86fa7c9a8991ecdcba3..4d1fc4f0d815bfb11bed03373f73da436a63e492 100644
--- a/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
+++ b/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
@@ -25,16 +25,22 @@ use bp_messages::MessageNonce;
 use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use frame_support::weights::Weight;
 use messages_relay::message_lane::MessageLane;
-use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams};
+use relay_rococo_client::{
+	HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams,
+};
 use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
-use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo};
-use substrate_relay_helper::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics, SubstrateMessageLane,
-	SubstrateMessageLaneToSubstrate,
+use relay_wococo_client::{
+	HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo,
+};
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
 };
-use substrate_relay_helper::messages_source::SubstrateMessagesSource;
-use substrate_relay_helper::messages_target::SubstrateMessagesTarget;
 
 /// Wococo-to-Rococo message lane.
 pub type MessageLaneWococoMessagesToRococo =
@@ -47,23 +53,30 @@ pub struct WococoMessagesToRococo {
 
 impl SubstrateMessageLane for WococoMessagesToRococo {
 	type MessageLane = MessageLaneWococoMessagesToRococo;
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_rococo::TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_wococo::FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
 
 	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME;
 	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME;
 
-	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = bp_rococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_rococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Wococo;
 	type TargetChain = Rococo;
@@ -114,11 +127,7 @@ impl SubstrateMessageLane for WococoMessagesToRococo {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo(
@@ -177,14 +186,14 @@ pub async fn run(
 	// we don't know exact weights of the Rococo runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_rococo::max_extrinsic_weight(),
 			bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -216,8 +225,10 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
diff --git a/bridges/relays/bin-substrate/src/cli/derive_account.rs b/bridges/relays/bin-substrate/src/cli/derive_account.rs
index 1903d42c1fcb0835390e5c155001a794d1af1201..5b809eb69f22237ebf0505861e2ffbbcf73eba9b 100644
--- a/bridges/relays/bin-substrate/src/cli/derive_account.rs
+++ b/bridges/relays/bin-substrate/src/cli/derive_account.rs
@@ -14,8 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{bridge::FullBridge, AccountId};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, AccountId},
+	select_full_bridge,
+};
 use relay_substrate_client::Chain;
 use structopt::StructOpt;
 use strum::VariantNames;
@@ -55,11 +57,7 @@ impl DeriveAccount {
 		select_full_bridge!(self.bridge, {
 			let (account, derived_account) = self.derive_account();
 			println!("Source address:\n{} ({})", account, Source::NAME);
-			println!(
-				"->Corresponding (derived) address:\n{} ({})",
-				derived_account,
-				Target::NAME,
-			);
+			println!("->Corresponding (derived) address:\n{} ({})", derived_account, Target::NAME,);
 
 			Ok(())
 		})
diff --git a/bridges/relays/bin-substrate/src/cli/encode_call.rs b/bridges/relays/bin-substrate/src/cli/encode_call.rs
index 23ea4b80738ea422ed5101bf7b24a6ec75514860..5973faab569e0a984b4ab882031e07434648c49f 100644
--- a/bridges/relays/bin-substrate/src/cli/encode_call.rs
+++ b/bridges/relays/bin-substrate/src/cli/encode_call.rs
@@ -14,9 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId};
-use crate::select_full_bridge;
+use crate::{
+	cli::{
+		bridge::FullBridge, AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId,
+	},
+	select_full_bridge,
+};
 use frame_support::weights::DispatchInfo;
 use relay_substrate_client::Chain;
 use structopt::StructOpt;
@@ -126,31 +129,30 @@ pub(crate) fn preprocess_call<Source: CliEncodeCall + CliChain, Target: CliEncod
 	bridge_instance: u8,
 ) {
 	match *call {
-		Call::Raw { .. } => {}
-		Call::Remark {
-			ref remark_size,
-			ref mut remark_payload,
-		} => {
+		Call::Raw { .. } => {},
+		Call::Remark { ref remark_size, ref mut remark_payload } =>
 			if remark_payload.is_none() {
 				*remark_payload = Some(HexBytes(generate_remark_payload(
 					remark_size,
-					compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()),
+					compute_maximal_message_arguments_size(
+						Source::max_extrinsic_size(),
+						Target::max_extrinsic_size(),
+					),
 				)));
-			}
-		}
+			},
 		Call::Transfer { ref mut recipient, .. } => {
 			recipient.enforce_chain::<Source>();
-		}
-		Call::BridgeSendMessage {
-			ref mut bridge_instance_index,
-			..
-		} => {
+		},
+		Call::BridgeSendMessage { ref mut bridge_instance_index, .. } => {
 			*bridge_instance_index = bridge_instance;
-		}
+		},
 	};
 }
 
-fn generate_remark_payload(remark_size: &Option<ExplicitOrMaximal<usize>>, maximal_allowed_size: u32) -> Vec<u8> {
+fn generate_remark_payload(
+	remark_size: &Option<ExplicitOrMaximal<usize>>,
+	maximal_allowed_size: u32,
+) -> Vec<u8> {
 	match remark_size {
 		Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size],
 		Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _],
@@ -172,9 +174,11 @@ pub(crate) fn compute_maximal_message_arguments_size(
 ) -> u32 {
 	// assume that both signed extensions and other arguments fit 1KB
 	let service_tx_bytes_on_source_chain = 1024;
-	let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain;
-	let maximal_call_size =
-		bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size);
+	let maximal_source_extrinsic_size =
+		maximal_source_extrinsic_size - service_tx_bytes_on_source_chain;
+	let maximal_call_size = bridge_runtime_common::messages::target::maximal_incoming_message_size(
+		maximal_target_extrinsic_size,
+	);
 	let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size {
 		maximal_source_extrinsic_size
 	} else {
@@ -217,7 +221,8 @@ mod tests {
 	#[test]
 	fn should_encode_remark_with_default_payload() {
 		// given
-		let mut encode_call = EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark"]);
+		let mut encode_call =
+			EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark"]);
 
 		// when
 		let hex = encode_call.encode().unwrap();
@@ -247,8 +252,13 @@ mod tests {
 	#[test]
 	fn should_encode_remark_with_size() {
 		// given
-		let mut encode_call =
-			EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark", "--remark-size", "12"]);
+		let mut encode_call = EncodeCall::from_iter(vec![
+			"encode-call",
+			"rialto-to-millau",
+			"remark",
+			"--remark-size",
+			"12",
+		]);
 
 		// when
 		let hex = encode_call.encode().unwrap();
@@ -275,7 +285,10 @@ mod tests {
 		assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict);
 
 		let info = err.info.unwrap();
-		assert!(info.contains(&"remark-payload".to_string()) | info.contains(&"remark-size".to_string()))
+		assert!(
+			info.contains(&"remark-payload".to_string()) |
+				info.contains(&"remark-size".to_string())
+		)
 	}
 
 	#[test]
diff --git a/bridges/relays/bin-substrate/src/cli/encode_message.rs b/bridges/relays/bin-substrate/src/cli/encode_message.rs
index 36a4806976ef653c7408c70e7492e22946b86e9e..98e1269aa68e6002ba463503e2b960cdf14c4814 100644
--- a/bridges/relays/bin-substrate/src/cli/encode_message.rs
+++ b/bridges/relays/bin-substrate/src/cli/encode_message.rs
@@ -14,8 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{bridge::FullBridge, AccountId, CliChain, HexBytes};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, AccountId, CliChain, HexBytes},
+	select_full_bridge,
+};
 use structopt::StructOpt;
 use strum::VariantNames;
 
@@ -52,7 +54,8 @@ impl EncodeMessage {
 	/// Run the command.
 	pub fn encode(self) -> anyhow::Result<HexBytes> {
 		select_full_bridge!(self.bridge, {
-			let payload = Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?;
+			let payload =
+				Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?;
 			Ok(HexBytes::encode(&payload))
 		})
 	}
@@ -74,7 +77,8 @@ mod tests {
 	fn should_encode_raw_message() {
 		// given
 		let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000";
-		let encode_message = EncodeMessage::from_iter(vec!["encode-message", "rialto-to-millau", "raw", msg]);
+		let encode_message =
+			EncodeMessage::from_iter(vec!["encode-message", "rialto-to-millau", "raw", msg]);
 
 		// when
 		let hex = encode_message.encode().unwrap();
diff --git a/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
index 7e4cbf5b6b8ff546409a9df50612a39bbe493167..d063ce544cd243099711c9c0b5b048c5b8af90e7 100644
--- a/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
+++ b/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
@@ -14,9 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams},
+	select_full_bridge,
+};
 use bp_runtime::BalanceOf;
 use codec::{Decode, Encode};
 use relay_substrate_client::Chain;
@@ -42,21 +43,21 @@ pub struct EstimateFee {
 impl EstimateFee {
 	/// Run the command.
 	pub async fn run(self) -> anyhow::Result<()> {
-		let Self {
-			source,
-			bridge,
-			lane,
-			payload,
-		} = self;
+		let Self { source, bridge, lane, payload } = self;
 
 		select_full_bridge!(bridge, {
 			let source_client = source.to_client::<Source>().await?;
 			let lane = lane.into();
-			let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?;
+			let payload =
+				Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?;
 
-			let fee: BalanceOf<Source> =
-				estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload)
-					.await?;
+			let fee: BalanceOf<Source> = estimate_message_delivery_and_dispatch_fee(
+				&source_client,
+				ESTIMATE_MESSAGE_FEE_METHOD,
+				lane,
+				payload,
+			)
+			.await?;
 
 			log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _));
 			println!("{}", fee);
@@ -74,10 +75,11 @@ pub(crate) async fn estimate_message_delivery_and_dispatch_fee<Fee: Decode, C: C
 	let encoded_response = client
 		.state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None)
 		.await?;
-	let decoded_response: Option<Fee> =
-		Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?;
-	let fee = decoded_response
-		.ok_or_else(|| anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())))?;
+	let decoded_response: Option<Fee> = Decode::decode(&mut &encoded_response.0[..])
+		.map_err(relay_substrate_client::Error::ResponseParseFailed)?;
+	let fee = decoded_response.ok_or_else(|| {
+		anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec()))
+	})?;
 	Ok(fee)
 }
 
diff --git a/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/bridges/relays/bin-substrate/src/cli/init_bridge.rs
index 3e464e6f54556bcf2c84736baffa148e38e370f6..f1c957f5e217df7380b19898845ed492c378a4d2 100644
--- a/bridges/relays/bin-substrate/src/cli/init_bridge.rs
+++ b/bridges/relays/bin-substrate/src/cli/init_bridge.rs
@@ -67,7 +67,7 @@ macro_rules! select_bridge {
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::RialtoToMillau => {
 				type Source = relay_rialto_client::Rialto;
 				type Target = relay_millau_client::Millau;
@@ -83,7 +83,7 @@ macro_rules! select_bridge {
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::WestendToMillau => {
 				type Source = relay_westend_client::Westend;
 				type Target = relay_millau_client::Millau;
@@ -91,9 +91,10 @@ macro_rules! select_bridge {
 				fn encode_init_bridge(
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
-					// at Westend -> Millau initialization we're not using sudo, because otherwise our deployments
-					// may fail, because we need to initialize both Rialto -> Millau and Westend -> Millau bridge.
-					// => since there's single possible sudo account, one of transaction may fail with duplicate nonce error
+					// at Westend -> Millau initialization we're not using sudo, because otherwise
+					// our deployments may fail, because we need to initialize both Rialto -> Millau
+					// and Westend -> Millau bridge. => since there's single possible sudo account,
+					// one of transaction may fail with duplicate nonce error
 					millau_runtime::BridgeGrandpaWestendCall::<
 						millau_runtime::Runtime,
 						millau_runtime::WestendGrandpaInstance,
@@ -102,7 +103,7 @@ macro_rules! select_bridge {
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::RococoToWococo => {
 				type Source = relay_rococo_client::Rococo;
 				type Target = relay_wococo_client::Wococo;
@@ -111,12 +112,14 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_wococo_client::runtime::Call::BridgeGrandpaRococo(
-						relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(init_data),
+						relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::WococoToRococo => {
 				type Source = relay_wococo_client::Wococo;
 				type Target = relay_rococo_client::Rococo;
@@ -125,12 +128,14 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_rococo_client::runtime::Call::BridgeGrandpaWococo(
-						relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(init_data),
+						relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::KusamaToPolkadot => {
 				type Source = relay_kusama_client::Kusama;
 				type Target = relay_polkadot_client::Polkadot;
@@ -139,12 +144,14 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa(
-						relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::initialize(init_data),
+						relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::PolkadotToKusama => {
 				type Source = relay_polkadot_client::Polkadot;
 				type Target = relay_kusama_client::Kusama;
@@ -153,12 +160,14 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_kusama_client::runtime::Call::BridgePolkadotGrandpa(
-						relay_kusama_client::runtime::BridgePolkadotGrandpaCall::initialize(init_data),
+						relay_kusama_client::runtime::BridgePolkadotGrandpaCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 		}
 	};
 }
@@ -181,7 +190,10 @@ impl InitBridge {
 							*target_client.genesis_hash(),
 							&target_sign,
 							relay_substrate_client::TransactionEra::immortal(),
-							UnsignedTransaction::new(encode_init_bridge(initialization_data), transaction_nonce),
+							UnsignedTransaction::new(
+								encode_init_bridge(initialization_data),
+								transaction_nonce,
+							),
 						)
 						.encode(),
 					)
diff --git a/bridges/relays/bin-substrate/src/cli/mod.rs b/bridges/relays/bin-substrate/src/cli/mod.rs
index ac072de3056f28ef5742189907df86ffb38f820f..97998a33d35bf9ab88cd7dba45715f7f03039cd8 100644
--- a/bridges/relays/bin-substrate/src/cli/mod.rs
+++ b/bridges/relays/bin-substrate/src/cli/mod.rs
@@ -86,7 +86,8 @@ pub enum Command {
 	EncodeMessage(encode_message::EncodeMessage),
 	/// Estimate Delivery and Dispatch Fee required for message submission to messages pallet.
 	EstimateFee(estimate_fee::EstimateFee),
-	/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain.
+	/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target
+	/// chain.
 	DeriveAccount(derive_account::DeriveAccount),
 	/// Resubmit transactions with increased tip if they are stalled.
 	ResubmitTransactions(resubmit_transactions::ResubmitTransactions),
@@ -100,12 +101,15 @@ impl Command {
 		use relay_utils::initialize::{initialize_logger, initialize_relay};
 
 		match self {
-			Self::RelayHeaders(_) | Self::RelayMessages(_) | Self::RelayHeadersAndMessages(_) | Self::InitBridge(_) => {
+			Self::RelayHeaders(_) |
+			Self::RelayMessages(_) |
+			Self::RelayHeadersAndMessages(_) |
+			Self::InitBridge(_) => {
 				initialize_relay();
-			}
+			},
 			_ => {
 				initialize_logger(false);
-			}
+			},
 		}
 	}
 
@@ -195,10 +199,7 @@ const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed";
 impl AccountId {
 	/// Create new SS58-formatted address from raw account id.
 	pub fn from_raw<T: CliChain>(account: sp_runtime::AccountId32) -> Self {
-		Self {
-			account,
-			ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF),
-		}
+		Self { account, ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF) }
 	}
 
 	/// Enforces formatting account to be for given [`CliChain`] type.
@@ -236,7 +237,7 @@ pub trait CliChain: relay_substrate_client::Chain {
 	/// Chain's current version of the runtime.
 	const RUNTIME_VERSION: sp_version::RuntimeVersion;
 
-	/// Crypto keypair type used to send messages.
+	/// Crypto KeyPair type used to send messages.
 	///
 	/// In case of chains supporting multiple cryptos, pick one used by the CLI.
 	type KeyPair: sp_core::crypto::Pair;
@@ -250,7 +251,9 @@ pub trait CliChain: relay_substrate_client::Chain {
 	fn ss58_format() -> u16;
 
 	/// Construct message payload to be sent over the bridge.
-	fn encode_message(message: crate::cli::encode_message::MessagePayload) -> Result<Self::MessagePayload, String>;
+	fn encode_message(
+		message: crate::cli::encode_message::MessagePayload,
+	) -> Result<Self::MessagePayload, String>;
 
 	/// Maximal extrinsic weight (from the runtime).
 	fn max_extrinsic_weight() -> Weight;
@@ -352,7 +355,7 @@ where
 
 	fn from_str(s: &str) -> Result<Self, Self::Err> {
 		if s.to_lowercase() == "max" {
-			return Ok(ExplicitOrMaximal::Maximal);
+			return Ok(ExplicitOrMaximal::Maximal)
 		}
 
 		V::from_str(s)
@@ -531,10 +534,7 @@ mod tests {
 		let expected = vec![rialto1, rialto2, millau1, millau2];
 
 		// when
-		let parsed = expected
-			.iter()
-			.map(|s| AccountId::from_str(s).unwrap())
-			.collect::<Vec<_>>();
+		let parsed = expected.iter().map(|s| AccountId::from_str(s).unwrap()).collect::<Vec<_>>();
 
 		let actual = parsed.iter().map(|a| format!("{}", a)).collect::<Vec<_>>();
 
@@ -563,7 +563,8 @@ mod tests {
 
 		let alice = sp_core::sr25519::Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap();
 		let bob = sp_core::sr25519::Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap();
-		let bob_with_alice_password = sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap();
+		let bob_with_alice_password =
+			sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap();
 
 		let temp_dir = tempdir::TempDir::new("reads_suri_from_file").unwrap();
 		let mut suri_file_path = temp_dir.path().to_path_buf();
diff --git a/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/bridges/relays/bin-substrate/src/cli/relay_headers.rs
index 48e2d85efbcc381d5e54c8a87b33a573b6d5f837..e90c663bb33a0a8ec2117ef4b3857765088477ce 100644
--- a/bridges/relays/bin-substrate/src/cli/relay_headers.rs
+++ b/bridges/relays/bin-substrate/src/cli/relay_headers.rs
@@ -19,7 +19,9 @@ use strum::{EnumString, EnumVariantNames, VariantNames};
 
 use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline;
 
-use crate::cli::{PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams};
+use crate::cli::{
+	PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams,
+};
 
 /// Start headers relayer process.
 #[derive(StructOpt)]
@@ -27,7 +29,8 @@ pub struct RelayHeaders {
 	/// A bridge instance to relay headers for.
 	#[structopt(possible_values = RelayHeadersBridge::VARIANTS, case_insensitive = true)]
 	bridge: RelayHeadersBridge,
-	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) are relayed.
+	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set)
+	/// are relayed.
 	#[structopt(long)]
 	only_mandatory_headers: bool,
 	#[structopt(flatten)]
@@ -62,49 +65,49 @@ macro_rules! select_bridge {
 				type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::RialtoToMillau => {
 				type Source = relay_rialto_client::Rialto;
 				type Target = relay_millau_client::Millau;
 				type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::WestendToMillau => {
 				type Source = relay_westend_client::Westend;
 				type Target = relay_millau_client::Millau;
 				type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::RococoToWococo => {
 				type Source = relay_rococo_client::Rococo;
 				type Target = relay_wococo_client::Wococo;
 				type Finality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::WococoToRococo => {
 				type Source = relay_wococo_client::Wococo;
 				type Target = relay_rococo_client::Rococo;
 				type Finality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::KusamaToPolkadot => {
 				type Source = relay_kusama_client::Kusama;
 				type Target = relay_polkadot_client::Polkadot;
 				type Finality = crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::PolkadotToKusama => {
 				type Source = relay_polkadot_client::Polkadot;
 				type Target = relay_kusama_client::Kusama;
 				type Finality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama;
 
 				$generic
-			}
+			},
 		}
 	};
 }
diff --git a/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
index c6a0983141131453cec3cdd53d74027036148421..303acf3f125d632106483a651cdca74d1a07d056 100644
--- a/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
+++ b/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
@@ -27,14 +27,20 @@ use structopt::StructOpt;
 use strum::VariantNames;
 
 use codec::Encode;
-use relay_substrate_client::{AccountIdOf, Chain, Client, TransactionSignScheme, UnsignedTransaction};
+use relay_substrate_client::{
+	AccountIdOf, Chain, Client, TransactionSignScheme, UnsignedTransaction,
+};
 use relay_utils::metrics::MetricsParams;
 use sp_core::{Bytes, Pair};
-use substrate_relay_helper::messages_lane::{MessagesRelayParams, SubstrateMessageLane};
-use substrate_relay_helper::on_demand_headers::OnDemandHeadersRelay;
+use substrate_relay_helper::{
+	messages_lane::{MessagesRelayParams, SubstrateMessageLane},
+	on_demand_headers::OnDemandHeadersRelay,
+};
 
-use crate::cli::{relay_messages::RelayerMode, CliChain, HexLaneId, PrometheusParams};
-use crate::declare_chain_options;
+use crate::{
+	cli::{relay_messages::RelayerMode, CliChain, HexLaneId, PrometheusParams},
+	declare_chain_options,
+};
 
 /// Maximal allowed conversion rate error ratio (abs(real - stored) / stored) that we allow.
 ///
@@ -63,16 +69,17 @@ pub struct HeadersAndMessagesSharedParams {
 	/// Create relayers fund accounts on both chains, if it does not exists yet.
 	#[structopt(long)]
 	create_relayers_fund_accounts: bool,
-	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) are relayed.
+	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set)
+	/// are relayed.
 	#[structopt(long)]
 	only_mandatory_headers: bool,
 	#[structopt(flatten)]
 	prometheus_params: PrometheusParams,
 }
 
-// The reason behind this macro is that 'normal' relays are using source and target chains terminology,
-// which is unusable for both-way relays (if you're relaying headers from Rialto to Millau and from
-// Millau to Rialto, then which chain is source?).
+// The reason behind this macro is that 'normal' relays are using source and target chains
+// terminology, which is unusable for both-way relays (if you're relaying headers from Rialto to
+// Millau and from Millau to Rialto, then which chain is source?).
 macro_rules! declare_bridge_options {
 	($chain1:ident, $chain2:ident) => {
 		paste::item! {
@@ -116,25 +123,35 @@ macro_rules! select_bridge {
 				type Left = relay_millau_client::Millau;
 				type Right = relay_rialto_client::Rialto;
 
-				type LeftToRightFinality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
-				type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
+				type LeftToRightFinality =
+					crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
+				type RightToLeftFinality =
+					crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
 
-				type LeftToRightMessages = crate::chains::millau_messages_to_rialto::MillauMessagesToRialto;
-				type RightToLeftMessages = crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau;
+				type LeftToRightMessages =
+					crate::chains::millau_messages_to_rialto::MillauMessagesToRialto;
+				type RightToLeftMessages =
+					crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau;
 
 				type LeftAccountIdConverter = bp_millau::AccountIdConverter;
 				type RightAccountIdConverter = bp_rialto::AccountIdConverter;
 
-				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber = bp_millau::SESSION_LENGTH;
-				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber = bp_rialto::SESSION_LENGTH;
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber =
+					bp_millau::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber =
+					bp_rialto::SESSION_LENGTH;
 
-				use crate::chains::millau_messages_to_rialto::{
-					add_standalone_metrics as add_left_to_right_standalone_metrics, run as left_to_right_messages,
-					update_rialto_to_millau_conversion_rate as update_right_to_left_conversion_rate,
-				};
-				use crate::chains::rialto_messages_to_millau::{
-					add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages,
-					update_millau_to_rialto_conversion_rate as update_left_to_right_conversion_rate,
+				use crate::chains::{
+					millau_messages_to_rialto::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+						update_rialto_to_millau_conversion_rate as update_right_to_left_conversion_rate,
+					},
+					rialto_messages_to_millau::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+						update_millau_to_rialto_conversion_rate as update_left_to_right_conversion_rate,
+					},
 				};
 
 				async fn left_create_account(
@@ -154,30 +171,40 @@ macro_rules! select_bridge {
 				}
 
 				$generic
-			}
+			},
 			RelayHeadersAndMessages::RococoWococo(_) => {
 				type Params = RococoWococoHeadersAndMessages;
 
 				type Left = relay_rococo_client::Rococo;
 				type Right = relay_wococo_client::Wococo;
 
-				type LeftToRightFinality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
-				type RightToLeftFinality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
+				type LeftToRightFinality =
+					crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
+				type RightToLeftFinality =
+					crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
 
-				type LeftToRightMessages = crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo;
-				type RightToLeftMessages = crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo;
+				type LeftToRightMessages =
+					crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo;
+				type RightToLeftMessages =
+					crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo;
 
 				type LeftAccountIdConverter = bp_rococo::AccountIdConverter;
 				type RightAccountIdConverter = bp_wococo::AccountIdConverter;
 
-				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber = bp_rococo::SESSION_LENGTH;
-				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber = bp_wococo::SESSION_LENGTH;
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber =
+					bp_rococo::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber =
+					bp_wococo::SESSION_LENGTH;
 
-				use crate::chains::rococo_messages_to_wococo::{
-					add_standalone_metrics as add_left_to_right_standalone_metrics, run as left_to_right_messages,
-				};
-				use crate::chains::wococo_messages_to_rococo::{
-					add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages,
+				use crate::chains::{
+					rococo_messages_to_wococo::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+					},
+					wococo_messages_to_rococo::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+					},
 				};
 
 				async fn update_right_to_left_conversion_rate(
@@ -213,32 +240,42 @@ macro_rules! select_bridge {
 				}
 
 				$generic
-			}
+			},
 			RelayHeadersAndMessages::KusamaPolkadot(_) => {
 				type Params = KusamaPolkadotHeadersAndMessages;
 
 				type Left = relay_kusama_client::Kusama;
 				type Right = relay_polkadot_client::Polkadot;
 
-				type LeftToRightFinality = crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot;
-				type RightToLeftFinality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama;
+				type LeftToRightFinality =
+					crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot;
+				type RightToLeftFinality =
+					crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama;
 
-				type LeftToRightMessages = crate::chains::kusama_messages_to_polkadot::KusamaMessagesToPolkadot;
-				type RightToLeftMessages = crate::chains::polkadot_messages_to_kusama::PolkadotMessagesToKusama;
+				type LeftToRightMessages =
+					crate::chains::kusama_messages_to_polkadot::KusamaMessagesToPolkadot;
+				type RightToLeftMessages =
+					crate::chains::polkadot_messages_to_kusama::PolkadotMessagesToKusama;
 
 				type LeftAccountIdConverter = bp_kusama::AccountIdConverter;
 				type RightAccountIdConverter = bp_polkadot::AccountIdConverter;
 
-				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_kusama::BlockNumber = bp_kusama::SESSION_LENGTH;
-				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_polkadot::BlockNumber = bp_polkadot::SESSION_LENGTH;
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_kusama::BlockNumber =
+					bp_kusama::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_polkadot::BlockNumber =
+					bp_polkadot::SESSION_LENGTH;
 
-				use crate::chains::kusama_messages_to_polkadot::{
-					add_standalone_metrics as add_left_to_right_standalone_metrics, run as left_to_right_messages,
-					update_polkadot_to_kusama_conversion_rate as update_right_to_left_conversion_rate,
-				};
-				use crate::chains::polkadot_messages_to_kusama::{
-					add_standalone_metrics as add_right_to_left_standalone_metrics, run as right_to_left_messages,
-					update_kusama_to_polkadot_conversion_rate as update_left_to_right_conversion_rate,
+				use crate::chains::{
+					kusama_messages_to_polkadot::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+						update_polkadot_to_kusama_conversion_rate as update_right_to_left_conversion_rate,
+					},
+					polkadot_messages_to_kusama::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+						update_kusama_to_polkadot_conversion_rate as update_left_to_right_conversion_rate,
+					},
 				};
 
 				async fn left_create_account(
@@ -248,25 +285,24 @@ macro_rules! select_bridge {
 				) -> anyhow::Result<()> {
 					let left_genesis_hash = *left_client.genesis_hash();
 					left_client
-						.submit_signed_extrinsic(left_sign.public().into(), move |_, transaction_nonce| {
-							Bytes(
-								Left::sign_transaction(
-									left_genesis_hash,
-									&left_sign,
-									relay_substrate_client::TransactionEra::immortal(),
-									UnsignedTransaction::new(
-										relay_kusama_client::runtime::Call::Balances(
-											relay_kusama_client::runtime::BalancesCall::transfer(
-												bp_kusama::AccountAddress::Id(account_id),
-												bp_kusama::EXISTENTIAL_DEPOSIT.into(),
+						.submit_signed_extrinsic(
+							left_sign.public().into(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Left::sign_transaction(left_genesis_hash, &left_sign, relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											relay_kusama_client::runtime::Call::Balances(
+												relay_kusama_client::runtime::BalancesCall::transfer(
+													bp_kusama::AccountAddress::Id(account_id),
+													bp_kusama::EXISTENTIAL_DEPOSIT.into(),
+												),
 											),
+											transaction_nonce,
 										),
-										transaction_nonce,
-									),
+									).encode()
 								)
-								.encode(),
-							)
-						})
+							},
+						)
 						.await
 						.map(drop)
 						.map_err(|e| anyhow::format_err!("{}", e))
@@ -279,32 +315,31 @@ macro_rules! select_bridge {
 				) -> anyhow::Result<()> {
 					let right_genesis_hash = *right_client.genesis_hash();
 					right_client
-						.submit_signed_extrinsic(right_sign.public().into(), move |_, transaction_nonce| {
-							Bytes(
-								Right::sign_transaction(
-									right_genesis_hash,
-									&right_sign,
-									relay_substrate_client::TransactionEra::immortal(),
-									UnsignedTransaction::new(
-										relay_polkadot_client::runtime::Call::Balances(
-											relay_polkadot_client::runtime::BalancesCall::transfer(
-												bp_polkadot::AccountAddress::Id(account_id),
-												bp_polkadot::EXISTENTIAL_DEPOSIT.into(),
+						.submit_signed_extrinsic(
+							right_sign.public().into(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Right::sign_transaction(right_genesis_hash, &right_sign, relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											relay_polkadot_client::runtime::Call::Balances(
+												relay_polkadot_client::runtime::BalancesCall::transfer(
+													bp_polkadot::AccountAddress::Id(account_id),
+													bp_polkadot::EXISTENTIAL_DEPOSIT.into(),
+												),
 											),
+											transaction_nonce,
 										),
-										transaction_nonce,
-									),
+									).encode()
 								)
-								.encode(),
-							)
-						})
+							},
+						)
 						.await
 						.map(drop)
 						.map_err(|e| anyhow::format_err!("{}", e))
 				}
 
 				$generic
-			}
+			},
 		}
 	};
 }
@@ -330,16 +365,19 @@ impl RelayHeadersAndMessages {
 			let left_client = params.left.to_client::<Left>().await?;
 			let left_transactions_mortality = params.left_sign.transactions_mortality()?;
 			let left_sign = params.left_sign.to_keypair::<Left>()?;
-			let left_messages_pallet_owner = params.left_messages_pallet_owner.to_keypair::<Left>()?;
+			let left_messages_pallet_owner =
+				params.left_messages_pallet_owner.to_keypair::<Left>()?;
 			let right_client = params.right.to_client::<Right>().await?;
 			let right_transactions_mortality = params.right_sign.transactions_mortality()?;
 			let right_sign = params.right_sign.to_keypair::<Right>()?;
-			let right_messages_pallet_owner = params.right_messages_pallet_owner.to_keypair::<Right>()?;
+			let right_messages_pallet_owner =
+				params.right_messages_pallet_owner.to_keypair::<Right>()?;
 
 			let lanes = params.shared.lane;
 			let relayer_mode = params.shared.relayer_mode.into();
 
-			const METRIC_IS_SOME_PROOF: &str = "it is `None` when metric has been already registered; \
+			const METRIC_IS_SOME_PROOF: &str =
+				"it is `None` when metric has been already registered; \
 				this is the command entrypoint, so nothing has been registered yet; \
 				qed";
 
@@ -413,22 +451,40 @@ impl RelayHeadersAndMessages {
 			}
 
 			if params.shared.create_relayers_fund_accounts {
-				let relayer_fund_acount_id =
-					pallet_bridge_messages::relayer_fund_account_id::<AccountIdOf<Left>, LeftAccountIdConverter>();
+				let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::<
+					AccountIdOf<Left>,
+					LeftAccountIdConverter,
+				>();
 				let relayers_fund_account_balance =
 					left_client.free_native_balance(relayer_fund_acount_id.clone()).await;
-				if let Err(relay_substrate_client::Error::AccountDoesNotExist) = relayers_fund_account_balance {
+				if let Err(relay_substrate_client::Error::AccountDoesNotExist) =
+					relayers_fund_account_balance
+				{
 					log::info!(target: "bridge", "Going to create relayers fund account at {}.", Left::NAME);
-					left_create_account(left_client.clone(), left_sign.clone(), relayer_fund_acount_id).await?;
+					left_create_account(
+						left_client.clone(),
+						left_sign.clone(),
+						relayer_fund_acount_id,
+					)
+					.await?;
 				}
 
-				let relayer_fund_acount_id =
-					pallet_bridge_messages::relayer_fund_account_id::<AccountIdOf<Right>, RightAccountIdConverter>();
+				let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::<
+					AccountIdOf<Right>,
+					RightAccountIdConverter,
+				>();
 				let relayers_fund_account_balance =
 					right_client.free_native_balance(relayer_fund_acount_id.clone()).await;
-				if let Err(relay_substrate_client::Error::AccountDoesNotExist) = relayers_fund_account_balance {
+				if let Err(relay_substrate_client::Error::AccountDoesNotExist) =
+					relayers_fund_account_balance
+				{
 					log::info!(target: "bridge", "Going to create relayers fund account at {}.", Right::NAME);
-					right_create_account(right_client.clone(), right_sign.clone(), relayer_fund_acount_id).await?;
+					right_create_account(
+						right_client.clone(),
+						right_sign.clone(),
+						relayer_fund_acount_id,
+					)
+					.await?;
 				}
 			}
 
diff --git a/bridges/relays/bin-substrate/src/cli/relay_messages.rs b/bridges/relays/bin-substrate/src/cli/relay_messages.rs
index 0f89e9843f1613a20545c0689e326c92524fe3ee..3ccf53348d611dd4c289338061c9730ca7fbe7ac 100644
--- a/bridges/relays/bin-substrate/src/cli/relay_messages.rs
+++ b/bridges/relays/bin-substrate/src/cli/relay_messages.rs
@@ -19,12 +19,13 @@ use strum::{EnumString, EnumVariantNames, VariantNames};
 
 use substrate_relay_helper::messages_lane::MessagesRelayParams;
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{
-	HexLaneId, PrometheusParams, SourceConnectionParams, SourceSigningParams, TargetConnectionParams,
-	TargetSigningParams,
+use crate::{
+	cli::{
+		bridge::FullBridge, HexLaneId, PrometheusParams, SourceConnectionParams,
+		SourceSigningParams, TargetConnectionParams, TargetSigningParams,
+	},
+	select_full_bridge,
 };
-use crate::select_full_bridge;
 
 /// Relayer operating mode.
 #[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq)]
@@ -32,7 +33,8 @@ use crate::select_full_bridge;
 pub enum RelayerMode {
 	/// The relayer doesn't care about rewards.
 	Altruistic,
-	/// The relayer will deliver all messages and confirmations as long as he's not losing any funds.
+	/// The relayer will deliver all messages and confirmations as long as he's not losing any
+	/// funds.
 	Rational,
 }
 
diff --git a/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs b/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs
index 4fad160fee08cc3e26306fe6571fef1932a038ee..03a9c114b0b12dd06ce9da37491d2b33f1440832 100644
--- a/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs
+++ b/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs
@@ -18,7 +18,9 @@ use crate::cli::{TargetConnectionParams, TargetSigningParams};
 
 use codec::{Decode, Encode};
 use num_traits::{One, Zero};
-use relay_substrate_client::{BlockWithJustification, Chain, Client, Error as SubstrateError, TransactionSignScheme};
+use relay_substrate_client::{
+	BlockWithJustification, Chain, Client, Error as SubstrateError, TransactionSignScheme,
+};
 use relay_utils::FailedClient;
 use sp_core::Bytes;
 use sp_runtime::{
@@ -54,13 +56,15 @@ macro_rules! select_bridge {
 				type Target = relay_millau_client::Millau;
 				type TargetSign = relay_millau_client::Millau;
 
-				// When large message is being sent from Millau to Rialto AND other transactions are blocking
-				// it from being mined, we'll see something like this in logs:
+				// When large message is being sent from Millau to Rialto AND other transactions are
+				// blocking it from being mined, we'll see something like this in logs:
 				//
-				// Millau transaction priority with tip=0: 17800827994. Target priority: 526186677695
+				// Millau transaction priority with tip=0: 17800827994. Target priority:
+				// 526186677695
 				//
-				// So since fee multiplier in Millau is `1` and `WeightToFee` is `IdentityFee`, then we need
-				// tip around `526186677695 - 17800827994 = 508_385_849_701`. Let's round it up to `1_000_000_000_000`.
+				// So since fee multiplier in Millau is `1` and `WeightToFee` is `IdentityFee`, then
+				// we need tip around `526186677695 - 17800827994 = 508_385_849_701`. Let's round it
+				// up to `1_000_000_000_000`.
 
 				const TIP_STEP: bp_millau::Balance = 1_000_000_000;
 				const TIP_LIMIT: bp_millau::Balance = 1_000_000_000_000;
@@ -68,7 +72,7 @@ macro_rules! select_bridge {
 				const STALLED_BLOCKS: bp_millau::BlockNumber = 5;
 
 				$generic
-			}
+			},
 		}
 	};
 }
@@ -158,8 +162,8 @@ async fn run_until_connection_lost<C: Chain, S: TransactionSignScheme<Chain = C>
 					C::NAME,
 					error,
 				);
-				return Err(FailedClient::Target);
-			}
+				return Err(FailedClient::Target)
+			},
 		};
 	}
 }
@@ -174,8 +178,8 @@ async fn run_loop_iteration<C: Chain, S: TransactionSignScheme<Chain = C>>(
 		Some(original_transaction) => original_transaction,
 		None => {
 			log::trace!(target: "bridge", "No {} transactions from required signer in the txpool", C::NAME);
-			return Ok(context);
-		}
+			return Ok(context)
+		},
 	};
 	let original_transaction_hash = C::Hasher::hash(&original_transaction.encode());
 	let context = context.notice_transaction(original_transaction_hash);
@@ -189,15 +193,15 @@ async fn run_loop_iteration<C: Chain, S: TransactionSignScheme<Chain = C>>(
 			context.stalled_for,
 			context.stalled_for_limit,
 		);
-		return Ok(context);
+		return Ok(context)
 	}
 
 	let (best_block, target_priority) = match read_previous_best_priority::<C, S>(&client).await? {
 		Some((best_block, target_priority)) => (best_block, target_priority),
 		None => {
 			log::trace!(target: "bridge", "Failed to read priority of best {} transaction in its best block", C::NAME);
-			return Ok(context);
-		}
+			return Ok(context)
+		},
 	};
 
 	let (is_updated, updated_transaction) = select_transaction_tip::<C, S>(
@@ -213,7 +217,7 @@ async fn run_loop_iteration<C: Chain, S: TransactionSignScheme<Chain = C>>(
 
 	if !is_updated {
 		log::trace!(target: "bridge", "{} transaction tip can not be updated. Reached limit?", C::NAME);
-		return Ok(context);
+		return Ok(context)
 	}
 
 	let updated_transaction = updated_transaction.encode();
@@ -241,10 +245,10 @@ async fn lookup_signer_transaction<C: Chain, S: TransactionSignScheme<Chain = C>
 		let pending_transaction = S::SignedTransaction::decode(&mut &pending_transaction.0[..])
 			.map_err(SubstrateError::ResponseParseFailed)?;
 		if !S::is_signed_by(key_pair, &pending_transaction) {
-			continue;
+			continue
 		}
 
-		return Ok(Some(pending_transaction));
+		return Ok(Some(pending_transaction))
 	}
 
 	Ok(None)
@@ -286,14 +290,15 @@ async fn select_transaction_tip<C: Chain, S: TransactionSignScheme<Chain = C>>(
 ) -> Result<(bool, S::SignedTransaction), SubstrateError> {
 	let stx = format!("{:?}", tx);
 	let mut current_priority = client.validate_transaction(at_block, tx.clone()).await??.priority;
-	let mut unsigned_tx = S::parse_transaction(tx)
-		.ok_or_else(|| SubstrateError::Custom(format!("Failed to parse {} transaction {}", C::NAME, stx,)))?;
+	let mut unsigned_tx = S::parse_transaction(tx).ok_or_else(|| {
+		SubstrateError::Custom(format!("Failed to parse {} transaction {}", C::NAME, stx,))
+	})?;
 	let old_tip = unsigned_tx.tip;
 
 	while current_priority < target_priority {
 		let next_tip = unsigned_tx.tip + tip_step;
 		if next_tip > tip_limit {
-			break;
+			break
 		}
 
 		log::trace!(
diff --git a/bridges/relays/bin-substrate/src/cli/send_message.rs b/bridges/relays/bin-substrate/src/cli/send_message.rs
index 04ce386ef09a9f7809770b3de814a581b9a16dcc..3e77ad8342927bdfc5a16f85b72098d22ec246a4 100644
--- a/bridges/relays/bin-substrate/src/cli/send_message.rs
+++ b/bridges/relays/bin-substrate/src/cli/send_message.rs
@@ -14,12 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::encode_call::{self, CliEncodeCall};
-use crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee;
 use crate::cli::{
-	Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, SourceSigningParams,
-	TargetSigningParams,
+	bridge::FullBridge,
+	encode_call::{self, CliEncodeCall},
+	estimate_fee::estimate_message_delivery_and_dispatch_fee,
+	Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams,
+	SourceSigningParams, TargetSigningParams,
 };
 use bp_message_dispatch::{CallOrigin, MessagePayload};
 use bp_runtime::BalanceOf;
@@ -77,7 +77,8 @@ pub struct SendMessage {
 	/// Dispatch weight of the message. If not passed, determined automatically.
 	#[structopt(long)]
 	dispatch_weight: Option<ExplicitOrMaximal<Weight>>,
-	/// Delivery and dispatch fee in source chain base currency units. If not passed, determined automatically.
+	/// Delivery and dispatch fee in source chain base currency units. If not passed, determined
+	/// automatically.
 	#[structopt(long)]
 	fee: Option<Balance>,
 	/// Message type.
@@ -138,7 +139,7 @@ impl SendMessage {
 								target_origin_public.into(),
 								digest_signature.into(),
 							)
-						}
+						},
 					},
 					&target_call,
 					*dispatch_fee_payment,
@@ -238,10 +239,7 @@ fn prepare_call_dispatch_weight(
 	weight_from_pre_dispatch_call: ExplicitOrMaximal<Weight>,
 	maximal_allowed_weight: Weight,
 ) -> Weight {
-	match user_specified_dispatch_weight
-		.clone()
-		.unwrap_or(weight_from_pre_dispatch_call)
-	{
+	match user_specified_dispatch_weight.clone().unwrap_or(weight_from_pre_dispatch_call) {
 		ExplicitOrMaximal::Explicit(weight) => weight,
 		ExplicitOrMaximal::Maximal => maximal_allowed_weight,
 	}
@@ -272,24 +270,14 @@ where
 	log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload));
 
 	// re-pack to return `Vec<u8>`
-	let MessagePayload {
-		spec_version,
-		weight,
-		origin,
-		dispatch_fee_payment,
-		call,
-	} = payload;
-	MessagePayload {
-		spec_version,
-		weight,
-		origin,
-		dispatch_fee_payment,
-		call: call.0,
-	}
+	let MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call } = payload;
+	MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call: call.0 }
 }
 
 pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight {
-	bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight)
+	bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(
+		maximal_extrinsic_weight,
+	)
 }
 
 #[cfg(test)]
@@ -321,7 +309,9 @@ mod tests {
 			MessagePayload {
 				spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version,
 				weight: 576000,
-				origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()),
+				origin: CallOrigin::SourceAccount(
+					sp_keyring::AccountKeyring::Alice.to_account_id()
+				),
 				dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain,
 				call: hex!("0001081234").to_vec(),
 			}
diff --git a/bridges/relays/bin-substrate/src/cli/swap_tokens.rs b/bridges/relays/bin-substrate/src/cli/swap_tokens.rs
index 264b98e948ff94ce047d5ec2f2007eded4bb61e0..d18d37efc3d20306ec1467630092a6cef58be2fb 100644
--- a/bridges/relays/bin-substrate/src/cli/swap_tokens.rs
+++ b/bridges/relays/bin-substrate/src/cli/swap_tokens.rs
@@ -28,15 +28,16 @@ use strum::{EnumString, EnumVariantNames, VariantNames};
 
 use frame_support::dispatch::GetDispatchInfo;
 use relay_substrate_client::{
-	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, CallOf, Chain, ChainWithBalances, Client,
-	Error as SubstrateError, HashOf, SignatureOf, Subscription, TransactionSignScheme, TransactionStatusOf,
-	UnsignedTransaction,
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, CallOf, Chain, ChainWithBalances,
+	Client, Error as SubstrateError, HashOf, SignatureOf, Subscription, TransactionSignScheme,
+	TransactionStatusOf, UnsignedTransaction,
 };
 use sp_core::{blake2_256, storage::StorageKey, Bytes, Pair, H256, U256};
 use sp_runtime::traits::{Convert, Header as HeaderT};
 
 use crate::cli::{
-	Balance, CliChain, SourceConnectionParams, SourceSigningParams, TargetConnectionParams, TargetSigningParams,
+	Balance, CliChain, SourceConnectionParams, SourceSigningParams, TargetConnectionParams,
+	TargetSigningParams,
 };
 
 /// Swap tokens.
@@ -71,7 +72,8 @@ pub struct SwapTokens {
 pub enum TokenSwapType {
 	/// The `target_sign` is temporary and only have funds for single swap.
 	NoLock,
-	/// This swap type prevents `source_signer` from restarting the swap after it has been completed.
+	/// This swap type prevents `source_signer` from restarting the swap after it has been
+	/// completed.
 	LockUntilBlock {
 		/// Number of blocks before the swap expires.
 		#[structopt(long)]
@@ -119,7 +121,7 @@ macro_rules! select_bridge {
 				const TARGET_TO_SOURCE_LANE_ID: bp_messages::LaneId = [0, 0, 0, 0];
 
 				$generic
-			}
+			},
 		}
 	};
 }
@@ -133,7 +135,8 @@ impl SwapTokens {
 			let target_client = self.target.to_client::<Target>().await?;
 			let target_sign = self.target_sign.to_keypair::<Target>()?;
 
-			// names of variables in this function are matching names used by the `pallet-bridge-token-swap`
+			// names of variables in this function are matching names used by the
+			// `pallet-bridge-token-swap`
 
 			// prepare token swap intention
 			let token_swap = self
@@ -143,18 +146,25 @@ impl SwapTokens {
 			// group all accounts that will be used later
 			let accounts = TokenSwapAccounts {
 				source_account_at_bridged_chain: derive_target_account_from_source_account(
-					bp_runtime::SourceAccount::Account(token_swap.source_account_at_this_chain.clone()),
+					bp_runtime::SourceAccount::Account(
+						token_swap.source_account_at_this_chain.clone(),
+					),
 				),
 				target_account_at_this_chain: derive_source_account_from_target_account(
-					bp_runtime::SourceAccount::Account(token_swap.target_account_at_bridged_chain.clone()),
+					bp_runtime::SourceAccount::Account(
+						token_swap.target_account_at_bridged_chain.clone(),
+					),
 				),
 				source_account_at_this_chain: token_swap.source_account_at_this_chain.clone(),
 				target_account_at_bridged_chain: token_swap.target_account_at_bridged_chain.clone(),
-				swap_account: FromSwapToThisAccountIdConverter::convert(token_swap.using_encoded(blake2_256).into()),
+				swap_account: FromSwapToThisAccountIdConverter::convert(
+					token_swap.using_encoded(blake2_256).into(),
+				),
 			};
 
 			// account balances are used to demonstrate what's happening :)
-			let initial_balances = read_account_balances(&accounts, &source_client, &target_client).await?;
+			let initial_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
 
 			// before calling something that may fail, log what we're trying to do
 			log::info!(target: "bridge", "Starting swap: {:?}", token_swap);
@@ -171,7 +181,8 @@ impl SwapTokens {
 				token_swap.target_balance_at_bridged_chain,
 			)
 			.into();
-			let bridged_currency_transfer_weight = bridged_currency_transfer.get_dispatch_info().weight;
+			let bridged_currency_transfer_weight =
+				bridged_currency_transfer.get_dispatch_info().weight;
 
 			// sign message
 			let bridged_chain_spec_version = TARGET_SPEC_VERSION;
@@ -182,10 +193,12 @@ impl SwapTokens {
 				SOURCE_CHAIN_ID,
 				TARGET_CHAIN_ID,
 			);
-			let bridged_currency_transfer_signature: SignatureOf<Target> = target_sign.sign(&signature_payload).into();
+			let bridged_currency_transfer_signature: SignatureOf<Target> =
+				target_sign.sign(&signature_payload).into();
 
 			// prepare `create_swap` call
-			let target_public_at_bridged_chain: AccountPublicOf<Target> = target_sign.public().into();
+			let target_public_at_bridged_chain: AccountPublicOf<Target> =
+				target_sign.public().into();
 			let swap_delivery_and_dispatch_fee: BalanceOf<Source> =
 				crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee(
 					&source_client,
@@ -199,7 +212,8 @@ impl SwapTokens {
 							target_public_at_bridged_chain.clone(),
 							bridged_currency_transfer_signature.clone(),
 						),
-						dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtTargetChain,
+						dispatch_fee_payment:
+							bp_runtime::messages::DispatchFeePayment::AtTargetChain,
 						call: bridged_currency_transfer.encode(),
 					},
 				)
@@ -245,19 +259,20 @@ impl SwapTokens {
 				pallet_bridge_token_swap::PENDING_SWAPS_MAP_NAME,
 				token_swap_hash.as_ref(),
 			);
-			match read_token_swap_state(&source_client, swap_created_at, &token_swap_storage_key).await? {
+			match read_token_swap_state(&source_client, swap_created_at, &token_swap_storage_key)
+				.await?
+			{
 				Some(bp_token_swap::TokenSwapState::Started) => {
 					log::info!(target: "bridge", "Swap has been successfully started");
 					let intermediate_balances =
 						read_account_balances(&accounts, &source_client, &target_client).await?;
 					log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances);
-				}
-				Some(token_swap_state) => {
+				},
+				Some(token_swap_state) =>
 					return Err(anyhow::format_err!(
 						"Fresh token swap has unexpected state: {:?}",
 						token_swap_state,
-					))
-				}
+					)),
 				None => return Err(anyhow::format_err!("Failed to start token swap")),
 			};
 
@@ -265,7 +280,8 @@ impl SwapTokens {
 			// Step 2: message is being relayed to the target chain and dispathed there
 			//
 
-			// wait until message is dispatched at the target chain and dispatch result delivered back to source chain
+			// wait until message is dispatched at the target chain and dispatch result delivered
+			// back to source chain
 			let token_swap_state = wait_until_token_swap_state_is_changed(
 				&source_client,
 				&token_swap_storage_key,
@@ -275,32 +291,37 @@ impl SwapTokens {
 			let is_transfer_succeeded = match token_swap_state {
 				Some(bp_token_swap::TokenSwapState::Started) => {
 					unreachable!("wait_until_token_swap_state_is_changed only returns if state is not Started; qed",)
-				}
-				None => return Err(anyhow::format_err!("Fresh token swap has disappeared unexpectedly")),
+				},
+				None =>
+					return Err(anyhow::format_err!("Fresh token swap has disappeared unexpectedly")),
 				Some(bp_token_swap::TokenSwapState::Confirmed) => {
 					log::info!(
 						target: "bridge",
 						"Transfer has been successfully dispatched at the target chain. Swap can be claimed",
 					);
 					true
-				}
+				},
 				Some(bp_token_swap::TokenSwapState::Failed) => {
 					log::info!(
 						target: "bridge",
 						"Transfer has been dispatched with an error at the target chain. Swap can be canceled",
 					);
 					false
-				}
+				},
 			};
 
-			// by this time: (1) token swap account has been created and (2) if transfer has been successfully
-			// dispatched, both target chain balances have changed
-			let intermediate_balances = read_account_balances(&accounts, &source_client, &target_client).await?;
+			// by this time: (1) token swap account has been created and (2) if transfer has been
+			// successfully dispatched, both target chain balances have changed
+			let intermediate_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
 			log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances);
 
-			// transfer has been dispatched, but we may need to wait until block where swap can be claimed/canceled
-			if let bp_token_swap::TokenSwapType::LockClaimUntilBlock(ref last_available_block_number, _) =
-				token_swap.swap_type
+			// transfer has been dispatched, but we may need to wait until block where swap can be
+			// claimed/canceled
+			if let bp_token_swap::TokenSwapType::LockClaimUntilBlock(
+				ref last_available_block_number,
+				_,
+			) = token_swap.swap_type
 			{
 				wait_until_swap_unlocked(
 					&source_client,
@@ -317,7 +338,8 @@ impl SwapTokens {
 				log::info!(target: "bridge", "Claiming the swap swap");
 
 				// prepare `claim_swap` message that will be sent over the bridge
-				let claim_swap_call: CallOf<Source> = pallet_bridge_token_swap::Call::claim_swap(token_swap).into();
+				let claim_swap_call: CallOf<Source> =
+					pallet_bridge_token_swap::Call::claim_swap(token_swap).into();
 				let claim_swap_message = bp_message_dispatch::MessagePayload {
 					spec_version: SOURCE_SPEC_VERSION,
 					weight: claim_swap_call.get_dispatch_info().weight,
@@ -354,7 +376,10 @@ impl SwapTokens {
 										target_genesis_hash,
 										&target_sign,
 										relay_substrate_client::TransactionEra::immortal(),
-										UnsignedTransaction::new(send_message_call, transaction_nonce),
+										UnsignedTransaction::new(
+											send_message_call,
+											transaction_nonce,
+										),
 									)
 									.encode(),
 								)
@@ -374,7 +399,7 @@ impl SwapTokens {
 				if token_swap_state != None {
 					return Err(anyhow::format_err!(
 						"Confirmed token swap state has been changed to {:?} unexpectedly"
-					));
+					))
 				}
 			} else {
 				log::info!(target: "bridge", "Cancelling the swap");
@@ -390,7 +415,10 @@ impl SwapTokens {
 										source_genesis_hash,
 										&source_sign,
 										relay_substrate_client::TransactionEra::immortal(),
-										UnsignedTransaction::new(cancel_swap_call, transaction_nonce),
+										UnsignedTransaction::new(
+											cancel_swap_call,
+											transaction_nonce,
+										),
 									)
 									.encode(),
 								)
@@ -402,7 +430,8 @@ impl SwapTokens {
 			}
 
 			// print final balances
-			let final_balances = read_account_balances(&accounts, &source_client, &target_client).await?;
+			let final_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
 			log::info!(target: "bridge", "Final account balances: {:?}", final_balances);
 
 			Ok(())
@@ -454,22 +483,18 @@ impl SwapTokens {
 		source_client: &Client<Source>,
 	) -> anyhow::Result<bp_token_swap::TokenSwapType<BlockNumberOf<Source>>> {
 		match self.swap_type {
-			TokenSwapType::NoLock => Ok(bp_token_swap::TokenSwapType::TemporaryTargetAccountAtBridgedChain),
-			TokenSwapType::LockUntilBlock {
-				blocks_before_expire,
-				ref swap_nonce,
-			} => {
+			TokenSwapType::NoLock =>
+				Ok(bp_token_swap::TokenSwapType::TemporaryTargetAccountAtBridgedChain),
+			TokenSwapType::LockUntilBlock { blocks_before_expire, ref swap_nonce } => {
 				let blocks_before_expire: BlockNumberOf<Source> = blocks_before_expire.into();
 				let current_source_block_number = *source_client.best_header().await?.number();
 				Ok(bp_token_swap::TokenSwapType::LockClaimUntilBlock(
 					current_source_block_number + blocks_before_expire,
 					swap_nonce.unwrap_or_else(|| {
-						U256::from(random::<u128>())
-							.overflowing_mul(U256::from(random::<u128>()))
-							.0
+						U256::from(random::<u128>()).overflowing_mul(U256::from(random::<u128>())).0
 					}),
 				))
-			}
+			},
 		}
 	}
 }
@@ -551,17 +576,16 @@ async fn wait_until_transaction_is_finalized<C: Chain>(
 	loop {
 		let transaction_status = subscription.next().await?;
 		match transaction_status {
-			Some(TransactionStatusOf::<C>::FinalityTimeout(_))
-			| Some(TransactionStatusOf::<C>::Usurped(_))
-			| Some(TransactionStatusOf::<C>::Dropped)
-			| Some(TransactionStatusOf::<C>::Invalid)
-			| None => {
+			Some(TransactionStatusOf::<C>::FinalityTimeout(_)) |
+			Some(TransactionStatusOf::<C>::Usurped(_)) |
+			Some(TransactionStatusOf::<C>::Dropped) |
+			Some(TransactionStatusOf::<C>::Invalid) |
+			None =>
 				return Err(anyhow::format_err!(
 					"We've been waiting for finalization of {} transaction, but it now has the {:?} status",
 					C::NAME,
 					transaction_status,
-				))
-			}
+				)),
 			Some(TransactionStatusOf::<C>::Finalized(block_hash)) => {
 				log::trace!(
 					target: "bridge",
@@ -569,8 +593,8 @@ async fn wait_until_transaction_is_finalized<C: Chain>(
 					C::NAME,
 					block_hash,
 				);
-				return Ok(block_hash);
-			}
+				return Ok(block_hash)
+			},
 			_ => {
 				log::trace!(
 					target: "bridge",
@@ -578,7 +602,7 @@ async fn wait_until_transaction_is_finalized<C: Chain>(
 					C::NAME,
 					transaction_status,
 				);
-			}
+			},
 		}
 	}
 }
@@ -597,9 +621,10 @@ async fn wait_until_token_swap_state_is_changed<C: Chain>(
 		let best_block_hash = client.block_hash_by_number(best_block).await?;
 		log::trace!(target: "bridge", "Inspecting {} block {}/{}", C::NAME, best_block, best_block_hash);
 
-		let token_swap_state = read_token_swap_state(client, best_block_hash, swap_state_storage_key).await?;
+		let token_swap_state =
+			read_token_swap_state(client, best_block_hash, swap_state_storage_key).await?;
 		match token_swap_state {
-			Some(new_token_swap_state) if new_token_swap_state == previous_token_swap_state => {}
+			Some(new_token_swap_state) if new_token_swap_state == previous_token_swap_state => {},
 			_ => {
 				log::trace!(
 					target: "bridge",
@@ -607,8 +632,8 @@ async fn wait_until_token_swap_state_is_changed<C: Chain>(
 					previous_token_swap_state,
 					token_swap_state,
 				);
-				return Ok(token_swap_state);
-			}
+				return Ok(token_swap_state)
+			},
 		}
 	}
 }
@@ -625,7 +650,7 @@ async fn wait_until_swap_unlocked<C: Chain>(
 		let best_block = client.best_finalized_header_number().await?;
 		let best_block_hash = client.block_hash_by_number(best_block).await?;
 		if best_block >= required_block_number {
-			return Ok(());
+			return Ok(())
 		}
 
 		log::trace!(target: "bridge", "Skipping {} block {}/{}", C::NAME, best_block, best_block_hash);
@@ -638,7 +663,5 @@ async fn read_token_swap_state<C: Chain>(
 	at_block: C::Hash,
 	swap_state_storage_key: &StorageKey,
 ) -> anyhow::Result<Option<bp_token_swap::TokenSwapState>> {
-	Ok(client
-		.storage_value(swap_state_storage_key.clone(), Some(at_block))
-		.await?)
+	Ok(client.storage_value(swap_state_storage_key.clone(), Some(at_block)).await?)
 }
diff --git a/bridges/relays/client-ethereum/src/client.rs b/bridges/relays/client-ethereum/src/client.rs
index c82dddcf36ee8bacd73b5b4da97bf24242a9ef51..48b7c9386f353a62a4d4388862d4c930a7c02c3e 100644
--- a/bridges/relays/client-ethereum/src/client.rs
+++ b/bridges/relays/client-ethereum/src/client.rs
@@ -14,12 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::rpc::Ethereum;
-use crate::types::{
-	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, SyncState, Transaction,
-	TransactionHash, H256, U256,
+use crate::{
+	rpc::Ethereum,
+	types::{
+		Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx,
+		SyncState, Transaction, TransactionHash, H256, U256,
+	},
+	ConnectionParams, Error, Result,
 };
-use crate::{ConnectionParams, Error, Result};
 
 use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder};
 use relay_utils::relay_loop::RECONNECT_DELAY;
@@ -57,15 +59,17 @@ impl Client {
 		}
 	}
 
-	/// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been established
-	/// or error otherwise.
+	/// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been
+	/// established or error otherwise.
 	pub async fn try_connect(params: ConnectionParams) -> Result<Self> {
 		let (tokio, client) = Self::build_client(&params).await?;
 		Ok(Self { tokio, client, params })
 	}
 
 	/// Build client to use in connection.
-	async fn build_client(params: &ConnectionParams) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
+	async fn build_client(
+		params: &ConnectionParams,
+	) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
 		let tokio = tokio::runtime::Runtime::new()?;
 		let uri = format!("ws://{}:{}", params.host, params.port);
 		let client = tokio
@@ -90,13 +94,14 @@ impl Client {
 			match Ethereum::syncing(&*client).await? {
 				SyncState::NotSyncing => Ok(()),
 				SyncState::Syncing(syncing) => {
-					let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block);
+					let missing_headers =
+						syncing.highest_block.saturating_sub(syncing.current_block);
 					if missing_headers > MAJOR_SYNC_BLOCKS.into() {
-						return Err(Error::ClientNotSynced(missing_headers));
+						return Err(Error::ClientNotSynced(missing_headers))
 					}
 
 					Ok(())
-				}
+				},
 			}
 		})
 		.await
@@ -104,21 +109,26 @@ impl Client {
 
 	/// Estimate gas usage for the given call.
 	pub async fn estimate_gas(&self, call_request: CallRequest) -> Result<U256> {
-		self.jsonrpsee_execute(move |client| async move { Ok(Ethereum::estimate_gas(&*client, call_request).await?) })
-			.await
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::estimate_gas(&*client, call_request).await?)
+		})
+		.await
 	}
 
 	/// Retrieve number of the best known block from the Ethereum node.
 	pub async fn best_block_number(&self) -> Result<u64> {
-		self.jsonrpsee_execute(move |client| async move { Ok(Ethereum::block_number(&*client).await?.as_u64()) })
-			.await
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::block_number(&*client).await?.as_u64())
+		})
+		.await
 	}
 
 	/// Retrieve number of the best known block from the Ethereum node.
 	pub async fn header_by_number(&self, block_number: u64) -> Result<Header> {
 		self.jsonrpsee_execute(move |client| async move {
 			let get_full_tx_objects = false;
-			let header = Ethereum::get_block_by_number(&*client, block_number, get_full_tx_objects).await?;
+			let header =
+				Ethereum::get_block_by_number(&*client, block_number, get_full_tx_objects).await?;
 			match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() {
 				true => Ok(header),
 				false => Err(Error::IncompleteHeader),
@@ -141,19 +151,28 @@ impl Client {
 	}
 
 	/// Retrieve block header and its transactions by its number from Ethereum node.
-	pub async fn header_by_number_with_transactions(&self, number: u64) -> Result<HeaderWithTransactions> {
+	pub async fn header_by_number_with_transactions(
+		&self,
+		number: u64,
+	) -> Result<HeaderWithTransactions> {
 		self.jsonrpsee_execute(move |client| async move {
 			let get_full_tx_objects = true;
-			let header = Ethereum::get_block_by_number_with_transactions(&*client, number, get_full_tx_objects).await?;
-
-			let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
+			let header = Ethereum::get_block_by_number_with_transactions(
+				&*client,
+				number,
+				get_full_tx_objects,
+			)
+			.await?;
+
+			let is_complete_header =
+				header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
 			if !is_complete_header {
-				return Err(Error::IncompleteHeader);
+				return Err(Error::IncompleteHeader)
 			}
 
 			let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
 			if !is_complete_transactions {
-				return Err(Error::IncompleteTransaction);
+				return Err(Error::IncompleteTransaction)
 			}
 
 			Ok(header)
@@ -162,19 +181,25 @@ impl Client {
 	}
 
 	/// Retrieve block header and its transactions by its hash from Ethereum node.
-	pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result<HeaderWithTransactions> {
+	pub async fn header_by_hash_with_transactions(
+		&self,
+		hash: H256,
+	) -> Result<HeaderWithTransactions> {
 		self.jsonrpsee_execute(move |client| async move {
 			let get_full_tx_objects = true;
-			let header = Ethereum::get_block_by_hash_with_transactions(&*client, hash, get_full_tx_objects).await?;
+			let header =
+				Ethereum::get_block_by_hash_with_transactions(&*client, hash, get_full_tx_objects)
+					.await?;
 
-			let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
+			let is_complete_header =
+				header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
 			if !is_complete_header {
-				return Err(Error::IncompleteHeader);
+				return Err(Error::IncompleteHeader)
 			}
 
 			let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
 			if !is_complete_transactions {
-				return Err(Error::IncompleteTransaction);
+				return Err(Error::IncompleteTransaction)
 			}
 
 			Ok(header)
@@ -184,8 +209,10 @@ impl Client {
 
 	/// Retrieve transaction by its hash from Ethereum node.
 	pub async fn transaction_by_hash(&self, hash: H256) -> Result<Option<Transaction>> {
-		self.jsonrpsee_execute(move |client| async move { Ok(Ethereum::transaction_by_hash(&*client, hash).await?) })
-			.await
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::transaction_by_hash(&*client, hash).await?)
+		})
+		.await
 	}
 
 	/// Retrieve transaction receipt by transaction hash.
@@ -198,9 +225,9 @@ impl Client {
 
 	/// Get the nonce of the given account.
 	pub async fn account_nonce(&self, address: Address) -> Result<U256> {
-		self.jsonrpsee_execute(
-			move |client| async move { Ok(Ethereum::get_transaction_count(&*client, address).await?) },
-		)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::get_transaction_count(&*client, address).await?)
+		})
 		.await
 	}
 
@@ -219,8 +246,10 @@ impl Client {
 
 	/// Call Ethereum smart contract.
 	pub async fn eth_call(&self, call_transaction: CallRequest) -> Result<Bytes> {
-		self.jsonrpsee_execute(move |client| async move { Ok(Ethereum::call(&*client, call_transaction).await?) })
-			.await
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::call(&*client, call_transaction).await?)
+		})
+		.await
 	}
 
 	/// Execute jsonrpsee future in tokio context.
@@ -231,8 +260,6 @@ impl Client {
 		T: Send + 'static,
 	{
 		let client = self.client.clone();
-		self.tokio
-			.spawn(async move { make_jsonrpsee_future(client).await })
-			.await?
+		self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await?
 	}
 }
diff --git a/bridges/relays/client-ethereum/src/error.rs b/bridges/relays/client-ethereum/src/error.rs
index 3650fd2fd9aad44c22ff1d66afb59d410eda9f3b..f3c832cd40066189e94257703ea27312e0196af8 100644
--- a/bridges/relays/client-ethereum/src/error.rs
+++ b/bridges/relays/client-ethereum/src/error.rs
@@ -73,10 +73,10 @@ impl MaybeConnectionError for Error {
 	fn is_connection_error(&self) -> bool {
 		matches!(
 			*self,
-			Error::RpcError(RpcError::Transport(_))
-				| Error::RpcError(RpcError::Internal(_))
-				| Error::RpcError(RpcError::RestartNeeded(_))
-				| Error::ClientNotSynced(_),
+			Error::RpcError(RpcError::Transport(_)) |
+				Error::RpcError(RpcError::Internal(_)) |
+				Error::RpcError(RpcError::RestartNeeded(_)) |
+				Error::ClientNotSynced(_),
 		)
 	}
 }
diff --git a/bridges/relays/client-ethereum/src/lib.rs b/bridges/relays/client-ethereum/src/lib.rs
index 9443a5d24f416a6142fe38ac7b88061fdab548ff..fa4877f8e5cfc28b5a47496756463bb4446f02bf 100644
--- a/bridges/relays/client-ethereum/src/lib.rs
+++ b/bridges/relays/client-ethereum/src/lib.rs
@@ -23,9 +23,11 @@ mod error;
 mod rpc;
 mod sign;
 
-pub use crate::client::Client;
-pub use crate::error::{Error, Result};
-pub use crate::sign::{sign_and_submit_transaction, SigningParams};
+pub use crate::{
+	client::Client,
+	error::{Error, Result},
+	sign::{sign_and_submit_transaction, SigningParams},
+};
 
 pub mod types;
 
@@ -40,9 +42,6 @@ pub struct ConnectionParams {
 
 impl Default for ConnectionParams {
 	fn default() -> Self {
-		ConnectionParams {
-			host: "localhost".into(),
-			port: 8546,
-		}
+		ConnectionParams { host: "localhost".into(), port: 8546 }
 	}
 }
diff --git a/bridges/relays/client-ethereum/src/rpc.rs b/bridges/relays/client-ethereum/src/rpc.rs
index 0fb81f7655a4622acdd0de4ae365f9cc67a4c15b..2479338b1015cac37652119554ff98a5a43737a5 100644
--- a/bridges/relays/client-ethereum/src/rpc.rs
+++ b/bridges/relays/client-ethereum/src/rpc.rs
@@ -17,8 +17,8 @@
 //! Ethereum node RPC interface.
 
 use crate::types::{
-	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash,
-	H256, U256, U64,
+	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction,
+	TransactionHash, H256, U256, U64,
 };
 
 jsonrpsee_proc_macros::rpc_client_api! {
diff --git a/bridges/relays/client-ethereum/src/sign.rs b/bridges/relays/client-ethereum/src/sign.rs
index da1dbc4842e4a86028bdf9b78812d6b7f187d4d9..0d9a8d032ee2ef31ebaedc615de2bbffefe60b8a 100644
--- a/bridges/relays/client-ethereum/src/sign.rs
+++ b/bridges/relays/client-ethereum/src/sign.rs
@@ -14,8 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::types::{Address, CallRequest, U256};
-use crate::{Client, Result};
+use crate::{
+	types::{Address, CallRequest, U256},
+	Client, Result,
+};
 use bp_eth_poa::signatures::{secret_to_address, SignTransaction};
 use hex_literal::hex;
 use secp256k1::SecretKey;
diff --git a/bridges/relays/client-kusama/src/lib.rs b/bridges/relays/client-kusama/src/lib.rs
index 608befaa9a36301c808627e72004f78a88873257..a93726620ff61924e4457ad90da6a2623b2e1ef1 100644
--- a/bridges/relays/client-kusama/src/lib.rs
+++ b/bridges/relays/client-kusama/src/lib.rs
@@ -18,7 +18,8 @@
 
 use codec::Encode;
 use relay_substrate_client::{
-	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, UnsignedTransaction,
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -75,7 +76,13 @@ impl TransactionSignScheme for Kusama {
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
 			unsigned.call,
-			bp_kusama::SignedExtensions::new(bp_kusama::VERSION, era, genesis_hash, unsigned.nonce, unsigned.tip),
+			bp_kusama::SignedExtensions::new(
+				bp_kusama::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
 		)
 		.expect("SignedExtension never fails.");
 
@@ -98,17 +105,15 @@ impl TransactionSignScheme for Kusama {
 	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
 		tx.signature
 			.as_ref()
-			.map(|(address, _, _)| *address == bp_kusama::AccountId::from(*signer.public().as_array_ref()).into())
+			.map(|(address, _, _)| {
+				*address == bp_kusama::AccountId::from(*signer.public().as_array_ref()).into()
+			})
 			.unwrap_or(false)
 	}
 
 	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
 		let extra = &tx.signature.as_ref()?.2;
-		Some(UnsignedTransaction {
-			call: tx.function,
-			nonce: extra.nonce(),
-			tip: extra.tip(),
-		})
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
 	}
 }
 
diff --git a/bridges/relays/client-kusama/src/runtime.rs b/bridges/relays/client-kusama/src/runtime.rs
index f2145f5b02ff17ad2fc58732f086abfe221e4b9d..702ce3d385466581309de745ee58c54f465a1b77 100644
--- a/bridges/relays/client-kusama/src/runtime.rs
+++ b/bridges/relays/client-kusama/src/runtime.rs
@@ -29,8 +29,8 @@ pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
 /// Polkadot account ownership digest from Kusama.
 ///
 /// The byte vector returned by this function should be signed with a Polkadot account private key.
-/// This way, the owner of `kusama_account_id` on Kusama proves that the Polkadot account private key
-/// is also under his control.
+/// This way, the owner of `kusama_account_id` on Kusama proves that the Polkadot account private
+/// key is also under his control.
 pub fn kusama_to_polkadot_account_ownership_digest<Call, AccountId, SpecVersion>(
 	polkadot_call: &Call,
 	kusama_account_id: AccountId,
@@ -128,7 +128,9 @@ pub enum BridgePolkadotMessagesCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_polkadot::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_polkadot::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/bridges/relays/client-millau/src/lib.rs b/bridges/relays/client-millau/src/lib.rs
index aa20fe76cb7654c84adce966657184fab31c9761..3f1aba1f3b372493b26d6217f206dbac66695bda 100644
--- a/bridges/relays/client-millau/src/lib.rs
+++ b/bridges/relays/client-millau/src/lib.rs
@@ -18,8 +18,8 @@
 
 use codec::{Compact, Decode, Encode};
 use relay_substrate_client::{
-	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf, TransactionSignScheme,
-	UnsignedTransaction,
+	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf,
+	TransactionSignScheme, UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -100,7 +100,12 @@ impl TransactionSignScheme for Millau {
 		let signer: sp_runtime::MultiSigner = signer.public().into();
 		let (call, extra, _) = raw_payload.deconstruct();
 
-		millau_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra)
+		millau_runtime::UncheckedExtrinsic::new_signed(
+			call,
+			signer.into_account(),
+			signature.into(),
+			extra,
+		)
 	}
 
 	fn is_signed(tx: &Self::SignedTransaction) -> bool {
@@ -110,7 +115,9 @@ impl TransactionSignScheme for Millau {
 	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
 		tx.signature
 			.as_ref()
-			.map(|(address, _, _)| *address == millau_runtime::Address::from(*signer.public().as_array_ref()))
+			.map(|(address, _, _)| {
+				*address == millau_runtime::Address::from(*signer.public().as_array_ref())
+			})
 			.unwrap_or(false)
 	}
 
@@ -118,9 +125,7 @@ impl TransactionSignScheme for Millau {
 		let extra = &tx.signature.as_ref()?.2;
 		Some(UnsignedTransaction {
 			call: tx.function,
-			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..])
-				.ok()?
-				.into(),
+			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..]).ok()?.into(),
 			tip: Compact::<BalanceOf<Self::Chain>>::decode(&mut &extra.6.encode()[..])
 				.ok()?
 				.into(),
diff --git a/bridges/relays/client-polkadot/src/lib.rs b/bridges/relays/client-polkadot/src/lib.rs
index 3ba84e05bff717de70b214b57891865a2d87f801..e6ceabf583e0bfa3e27ebbce9641d57340cbb94d 100644
--- a/bridges/relays/client-polkadot/src/lib.rs
+++ b/bridges/relays/client-polkadot/src/lib.rs
@@ -18,7 +18,8 @@
 
 use codec::Encode;
 use relay_substrate_client::{
-	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, UnsignedTransaction,
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -75,7 +76,13 @@ impl TransactionSignScheme for Polkadot {
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
 			unsigned.call,
-			bp_polkadot::SignedExtensions::new(bp_polkadot::VERSION, era, genesis_hash, unsigned.nonce, unsigned.tip),
+			bp_polkadot::SignedExtensions::new(
+				bp_polkadot::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
 		)
 		.expect("SignedExtension never fails.");
 
@@ -98,17 +105,15 @@ impl TransactionSignScheme for Polkadot {
 	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
 		tx.signature
 			.as_ref()
-			.map(|(address, _, _)| *address == bp_polkadot::AccountId::from(*signer.public().as_array_ref()).into())
+			.map(|(address, _, _)| {
+				*address == bp_polkadot::AccountId::from(*signer.public().as_array_ref()).into()
+			})
 			.unwrap_or(false)
 	}
 
 	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
 		let extra = &tx.signature.as_ref()?.2;
-		Some(UnsignedTransaction {
-			call: tx.function,
-			nonce: extra.nonce(),
-			tip: extra.tip(),
-		})
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
 	}
 }
 
diff --git a/bridges/relays/client-polkadot/src/runtime.rs b/bridges/relays/client-polkadot/src/runtime.rs
index 1c5d42a00c222db7e96b75480d175a82ba14254a..e21b27f6d3dea47e54935ef23aa4f014741dc93c 100644
--- a/bridges/relays/client-polkadot/src/runtime.rs
+++ b/bridges/relays/client-polkadot/src/runtime.rs
@@ -128,7 +128,9 @@ pub enum BridgeKusamaMessagesCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_kusama::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_kusama::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/bridges/relays/client-rialto/src/lib.rs b/bridges/relays/client-rialto/src/lib.rs
index f7ba2ca65cba817b72fa1008f346a2315ebc3e5c..42ed8bce3bd9b432726d7ba138f16668e50ebd6e 100644
--- a/bridges/relays/client-rialto/src/lib.rs
+++ b/bridges/relays/client-rialto/src/lib.rs
@@ -18,8 +18,8 @@
 
 use codec::{Compact, Decode, Encode};
 use relay_substrate_client::{
-	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf, TransactionSignScheme,
-	UnsignedTransaction,
+	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf,
+	TransactionSignScheme, UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -100,7 +100,12 @@ impl TransactionSignScheme for Rialto {
 		let signer: sp_runtime::MultiSigner = signer.public().into();
 		let (call, extra, _) = raw_payload.deconstruct();
 
-		rialto_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account().into(), signature.into(), extra)
+		rialto_runtime::UncheckedExtrinsic::new_signed(
+			call,
+			signer.into_account().into(),
+			signature.into(),
+			extra,
+		)
 	}
 
 	fn is_signed(tx: &Self::SignedTransaction) -> bool {
@@ -118,9 +123,7 @@ impl TransactionSignScheme for Rialto {
 		let extra = &tx.signature.as_ref()?.2;
 		Some(UnsignedTransaction {
 			call: tx.function,
-			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..])
-				.ok()?
-				.into(),
+			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..]).ok()?.into(),
 			tip: Compact::<BalanceOf<Self::Chain>>::decode(&mut &extra.6.encode()[..])
 				.ok()?
 				.into(),
diff --git a/bridges/relays/client-rococo/src/lib.rs b/bridges/relays/client-rococo/src/lib.rs
index 4d6e3b969c2a1f9a6d11f3eaa59fbea51f72b103..ad61e3cfd6437be5cf2c964d9f3f569beda51bdd 100644
--- a/bridges/relays/client-rococo/src/lib.rs
+++ b/bridges/relays/client-rococo/src/lib.rs
@@ -18,7 +18,8 @@
 
 use codec::Encode;
 use relay_substrate_client::{
-	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, UnsignedTransaction,
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -78,7 +79,13 @@ impl TransactionSignScheme for Rococo {
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
 			unsigned.call,
-			bp_rococo::SignedExtensions::new(bp_rococo::VERSION, era, genesis_hash, unsigned.nonce, unsigned.tip),
+			bp_rococo::SignedExtensions::new(
+				bp_rococo::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
 		)
 		.expect("SignedExtension never fails.");
 
@@ -101,17 +108,15 @@ impl TransactionSignScheme for Rococo {
 	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
 		tx.signature
 			.as_ref()
-			.map(|(address, _, _)| *address == bp_rococo::AccountId::from(*signer.public().as_array_ref()).into())
+			.map(|(address, _, _)| {
+				*address == bp_rococo::AccountId::from(*signer.public().as_array_ref()).into()
+			})
 			.unwrap_or(false)
 	}
 
 	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
 		let extra = &tx.signature.as_ref()?.2;
-		Some(UnsignedTransaction {
-			call: tx.function,
-			nonce: extra.nonce(),
-			tip: extra.tip(),
-		})
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
 	}
 }
 
diff --git a/bridges/relays/client-rococo/src/runtime.rs b/bridges/relays/client-rococo/src/runtime.rs
index b68aea985e772962abbf61b9ca7aee45c9dbdc84..a82aa81a792ccbc6e2e493ccf4f65fa7d5c0742d 100644
--- a/bridges/relays/client-rococo/src/runtime.rs
+++ b/bridges/relays/client-rococo/src/runtime.rs
@@ -115,7 +115,9 @@ pub enum BridgeMessagesWococoCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_wococo::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_wococo::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs
index 84db77ec6dfd5d97bcc4bca7ddb7ce50f491f0cf..c3d4e30452423e9201fb7e3e4c4b0ea1f97afdfd 100644
--- a/bridges/relays/client-substrate/src/chain.rs
+++ b/bridges/relays/client-substrate/src/chain.rs
@@ -92,11 +92,7 @@ pub struct UnsignedTransaction<C: Chain> {
 impl<C: Chain> UnsignedTransaction<C> {
 	/// Create new unsigned transaction with given call, nonce and zero tip.
 	pub fn new(call: C::Call, nonce: C::Index) -> Self {
-		Self {
-			call,
-			nonce,
-			tip: Zero::zero(),
-		}
+		Self { call, nonce, tip: Zero::zero() }
 	}
 
 	/// Set transaction tip.
diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs
index be483243df0bee2e5c415ed9ccccf6c0852edc79..ca197867bc4b295eec7ac859b51cb32dd4758896 100644
--- a/bridges/relays/client-substrate/src/client.rs
+++ b/bridges/relays/client-substrate/src/client.rs
@@ -16,17 +16,21 @@
 
 //! Substrate node client.
 
-use crate::chain::{Chain, ChainWithBalances, TransactionStatusOf};
-use crate::rpc::Substrate;
-use crate::{ConnectionParams, Error, HashOf, HeaderIdOf, Result};
+use crate::{
+	chain::{Chain, ChainWithBalances, TransactionStatusOf},
+	rpc::Substrate,
+	ConnectionParams, Error, HashOf, HeaderIdOf, Result,
+};
 
 use async_std::sync::{Arc, Mutex};
 use async_trait::async_trait;
 use codec::{Decode, Encode};
 use frame_system::AccountInfo;
 use futures::{SinkExt, StreamExt};
-use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned};
-use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder};
+use jsonrpsee_ws_client::{
+	traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned, WsClient as RpcClient,
+	WsClientBuilder as RpcClientBuilder,
+};
 use num_traits::{Bounded, Zero};
 use pallet_balances::AccountData;
 use pallet_transaction_payment::InclusionFee;
@@ -62,9 +66,10 @@ pub struct Client<C: Chain> {
 	client: Arc<RpcClient>,
 	/// Genesis block hash.
 	genesis_hash: HashOf<C>,
-	/// If several tasks are submitting their transactions simultaneously using `submit_signed_extrinsic`
-	/// method, they may get the same transaction nonce. So one of transactions will be rejected
-	/// from the pool. This lock is here to prevent situations like that.
+	/// If several tasks are submitting their transactions simultaneously using
+	/// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of
+	/// transactions will be rejected from the pool. This lock is here to prevent situations like
+	/// that.
 	submit_signed_extrinsic_lock: Arc<Mutex<()>>,
 }
 
@@ -94,9 +99,7 @@ impl<C: Chain> Clone for Client<C> {
 
 impl<C: Chain> std::fmt::Debug for Client<C> {
 	fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
-		fmt.debug_struct("Client")
-			.field("genesis_hash", &self.genesis_hash)
-			.finish()
+		fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish()
 	}
 }
 
@@ -130,7 +133,9 @@ impl<C: Chain> Client<C> {
 		let number: C::BlockNumber = Zero::zero();
 		let genesis_hash_client = client.clone();
 		let genesis_hash = tokio
-			.spawn(async move { Substrate::<C>::chain_get_block_hash(&*genesis_hash_client, number).await })
+			.spawn(async move {
+				Substrate::<C>::chain_get_block_hash(&*genesis_hash_client, number).await
+			})
 			.await??;
 
 		Ok(Self {
@@ -143,7 +148,9 @@ impl<C: Chain> Client<C> {
 	}
 
 	/// Build client to use in connection.
-	async fn build_client(params: ConnectionParams) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
+	async fn build_client(
+		params: ConnectionParams,
+	) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
 		let tokio = tokio::runtime::Runtime::new()?;
 		let uri = format!(
 			"{}://{}:{}",
@@ -186,16 +193,15 @@ impl<C: Chain> Client<C> {
 
 	/// Return hash of the best finalized block.
 	pub async fn best_finalized_header_hash(&self) -> Result<C::Hash> {
-		self.jsonrpsee_execute(|client| async move { Ok(Substrate::<C>::chain_get_finalized_head(&*client).await?) })
-			.await
+		self.jsonrpsee_execute(|client| async move {
+			Ok(Substrate::<C>::chain_get_finalized_head(&*client).await?)
+		})
+		.await
 	}
 
 	/// Return number of the best finalized block.
 	pub async fn best_finalized_header_number(&self) -> Result<C::BlockNumber> {
-		Ok(*self
-			.header_by_hash(self.best_finalized_header_hash().await?)
-			.await?
-			.number())
+		Ok(*self.header_by_hash(self.best_finalized_header_hash().await?).await?.number())
 	}
 
 	/// Returns the best Substrate header.
@@ -203,15 +209,17 @@ impl<C: Chain> Client<C> {
 	where
 		C::Header: DeserializeOwned,
 	{
-		self.jsonrpsee_execute(|client| async move { Ok(Substrate::<C>::chain_get_header(&*client, None).await?) })
-			.await
+		self.jsonrpsee_execute(|client| async move {
+			Ok(Substrate::<C>::chain_get_header(&*client, None).await?)
+		})
+		.await
 	}
 
 	/// Get a Substrate block from its hash.
 	pub async fn get_block(&self, block_hash: Option<C::Hash>) -> Result<C::SignedBlock> {
-		self.jsonrpsee_execute(
-			move |client| async move { Ok(Substrate::<C>::chain_get_block(&*client, block_hash).await?) },
-		)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::chain_get_block(&*client, block_hash).await?)
+		})
 		.await
 	}
 
@@ -246,8 +254,10 @@ impl<C: Chain> Client<C> {
 
 	/// Return runtime version.
 	pub async fn runtime_version(&self) -> Result<RuntimeVersion> {
-		self.jsonrpsee_execute(move |client| async move { Ok(Substrate::<C>::state_runtime_version(&*client).await?) })
-			.await
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::state_runtime_version(&*client).await?)
+		})
+		.await
 	}
 
 	/// Read value from runtime storage.
@@ -259,7 +269,9 @@ impl<C: Chain> Client<C> {
 		self.jsonrpsee_execute(move |client| async move {
 			Substrate::<C>::state_get_storage(&*client, storage_key, block_hash)
 				.await?
-				.map(|encoded_value| T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed))
+				.map(|encoded_value| {
+					T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed)
+				})
 				.transpose()
 		})
 		.await
@@ -272,12 +284,14 @@ impl<C: Chain> Client<C> {
 	{
 		self.jsonrpsee_execute(move |client| async move {
 			let storage_key = C::account_info_storage_key(&account);
-			let encoded_account_data = Substrate::<C>::state_get_storage(&*client, storage_key, None)
-				.await?
-				.ok_or(Error::AccountDoesNotExist)?;
-			let decoded_account_data =
-				AccountInfo::<C::Index, AccountData<C::Balance>>::decode(&mut &encoded_account_data.0[..])
-					.map_err(Error::ResponseParseFailed)?;
+			let encoded_account_data =
+				Substrate::<C>::state_get_storage(&*client, storage_key, None)
+					.await?
+					.ok_or(Error::AccountDoesNotExist)?;
+			let decoded_account_data = AccountInfo::<C::Index, AccountData<C::Balance>>::decode(
+				&mut &encoded_account_data.0[..],
+			)
+			.map_err(Error::ResponseParseFailed)?;
 			Ok(decoded_account_data.data.free)
 		})
 		.await
@@ -348,9 +362,8 @@ impl<C: Chain> Client<C> {
 				let subscription = client
 					.subscribe(
 						"author_submitAndWatchExtrinsic",
-						JsonRpcParams::Array(vec![
-							jsonrpsee_types::to_json_value(extrinsic).map_err(|e| Error::RpcError(e.into()))?
-						]),
+						JsonRpcParams::Array(vec![jsonrpsee_types::to_json_value(extrinsic)
+							.map_err(|e| Error::RpcError(e.into()))?]),
 						"author_unwatchExtrinsic",
 					)
 					.await?;
@@ -370,9 +383,9 @@ impl<C: Chain> Client<C> {
 
 	/// Returns pending extrinsics from transaction pool.
 	pub async fn pending_extrinsics(&self) -> Result<Vec<Bytes>> {
-		self.jsonrpsee_execute(
-			move |client| async move { Ok(Substrate::<C>::author_pending_extrinsics(&*client).await?) },
-		)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::author_pending_extrinsics(&*client).await?)
+		})
 		.await
 	}
 
@@ -386,9 +399,10 @@ impl<C: Chain> Client<C> {
 			let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string();
 			let data = Bytes((TransactionSource::External, transaction, at_block).encode());
 
-			let encoded_response = Substrate::<C>::state_call(&*client, call, data, Some(at_block)).await?;
-			let validity =
-				TransactionValidity::decode(&mut &encoded_response.0[..]).map_err(Error::ResponseParseFailed)?;
+			let encoded_response =
+				Substrate::<C>::state_call(&*client, call, data, Some(at_block)).await?;
+			let validity = TransactionValidity::decode(&mut &encoded_response.0[..])
+				.map_err(Error::ResponseParseFailed)?;
 
 			Ok(validity)
 		})
@@ -396,9 +410,13 @@ impl<C: Chain> Client<C> {
 	}
 
 	/// Estimate fee that will be spent on given extrinsic.
-	pub async fn estimate_extrinsic_fee(&self, transaction: Bytes) -> Result<InclusionFee<C::Balance>> {
+	pub async fn estimate_extrinsic_fee(
+		&self,
+		transaction: Bytes,
+	) -> Result<InclusionFee<C::Balance>> {
 		self.jsonrpsee_execute(move |client| async move {
-			let fee_details = Substrate::<C>::payment_query_fee_details(&*client, transaction, None).await?;
+			let fee_details =
+				Substrate::<C>::payment_query_fee_details(&*client, transaction, None).await?;
 			let inclusion_fee = fee_details
 				.inclusion_fee
 				.map(|inclusion_fee| InclusionFee {
@@ -406,8 +424,10 @@ impl<C: Chain> Client<C> {
 						.unwrap_or_else(|_| C::Balance::max_value()),
 					len_fee: C::Balance::try_from(inclusion_fee.len_fee.into_u256())
 						.unwrap_or_else(|_| C::Balance::max_value()),
-					adjusted_weight_fee: C::Balance::try_from(inclusion_fee.adjusted_weight_fee.into_u256())
-						.unwrap_or_else(|_| C::Balance::max_value()),
+					adjusted_weight_fee: C::Balance::try_from(
+						inclusion_fee.adjusted_weight_fee.into_u256(),
+					)
+					.unwrap_or_else(|_| C::Balance::max_value()),
 				})
 				.unwrap_or_else(|| InclusionFee {
 					base_fee: Zero::zero(),
@@ -420,12 +440,16 @@ impl<C: Chain> Client<C> {
 	}
 
 	/// Get the GRANDPA authority set at given block.
-	pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result<OpaqueGrandpaAuthoritiesSet> {
+	pub async fn grandpa_authorities_set(
+		&self,
+		block: C::Hash,
+	) -> Result<OpaqueGrandpaAuthoritiesSet> {
 		self.jsonrpsee_execute(move |client| async move {
 			let call = SUB_API_GRANDPA_AUTHORITIES.to_string();
 			let data = Bytes(Vec::new());
 
-			let encoded_response = Substrate::<C>::state_call(&*client, call, data, Some(block)).await?;
+			let encoded_response =
+				Substrate::<C>::state_call(&*client, call, data, Some(block)).await?;
 			let authority_list = encoded_response.0;
 
 			Ok(authority_list)
@@ -434,7 +458,12 @@ impl<C: Chain> Client<C> {
 	}
 
 	/// Execute runtime call at given block.
-	pub async fn state_call(&self, method: String, data: Bytes, at_block: Option<C::Hash>) -> Result<Bytes> {
+	pub async fn state_call(
+		&self,
+		method: String,
+		data: Bytes,
+		at_block: Option<C::Hash>,
+	) -> Result<Bytes> {
 		self.jsonrpsee_execute(move |client| async move {
 			Substrate::<C>::state_call(&*client, method, data, at_block)
 				.await
@@ -444,7 +473,11 @@ impl<C: Chain> Client<C> {
 	}
 
 	/// Returns storage proof of given storage keys.
-	pub async fn prove_storage(&self, keys: Vec<StorageKey>, at_block: C::Hash) -> Result<StorageProof> {
+	pub async fn prove_storage(
+		&self,
+		keys: Vec<StorageKey>,
+		at_block: C::Hash,
+	) -> Result<StorageProof> {
 		self.jsonrpsee_execute(move |client| async move {
 			Substrate::<C>::state_prove_storage(&*client, keys, Some(at_block))
 				.await
@@ -485,9 +518,7 @@ impl<C: Chain> Client<C> {
 		T: Send + 'static,
 	{
 		let client = self.client.clone();
-		self.tokio
-			.spawn(async move { make_jsonrpsee_future(client).await })
-			.await?
+		self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await?
 	}
 }
 
@@ -508,11 +539,10 @@ impl<T: DeserializeOwned> Subscription<T> {
 	) {
 		loop {
 			match subscription.next().await {
-				Ok(Some(item)) => {
+				Ok(Some(item)) =>
 					if sender.send(Some(item)).await.is_err() {
-						break;
-					}
-				}
+						break
+					},
 				Ok(None) => {
 					log::trace!(
 						target: "bridge",
@@ -521,8 +551,8 @@ impl<T: DeserializeOwned> Subscription<T> {
 						item_type,
 					);
 					let _ = sender.send(None).await;
-					break;
-				}
+					break
+				},
 				Err(e) => {
 					log::trace!(
 						target: "bridge",
@@ -532,8 +562,8 @@ impl<T: DeserializeOwned> Subscription<T> {
 						e,
 					);
 					let _ = sender.send(None).await;
-					break;
-				}
+					break
+				},
 			}
 		}
 	}
diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs
index 187b4a1e633f5063ea5224b6db239c6f69e5c442..d3444516429be581f2f673f85ffede0358927781 100644
--- a/bridges/relays/client-substrate/src/error.rs
+++ b/bridges/relays/client-substrate/src/error.rs
@@ -96,10 +96,10 @@ impl MaybeConnectionError for Error {
 	fn is_connection_error(&self) -> bool {
 		matches!(
 			*self,
-			Error::RpcError(RpcError::Transport(_))
-				| Error::RpcError(RpcError::Internal(_))
-				| Error::RpcError(RpcError::RestartNeeded(_))
-				| Error::ClientNotSynced(_),
+			Error::RpcError(RpcError::Transport(_)) |
+				Error::RpcError(RpcError::Internal(_)) |
+				Error::RpcError(RpcError::RestartNeeded(_)) |
+				Error::ClientNotSynced(_),
 		)
 	}
 }
@@ -110,9 +110,11 @@ impl std::fmt::Display for Error {
 			Self::Io(e) => e.to_string(),
 			Self::RpcError(e) => e.to_string(),
 			Self::ResponseParseFailed(e) => e.to_string(),
-			Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(),
+			Self::UninitializedBridgePallet =>
+				"The Substrate bridge pallet has not been initialized yet.".into(),
 			Self::AccountDoesNotExist => "Account does not exist on the chain".into(),
-			Self::MissingMandatoryCodeEntry => "Mandatory :code: entry is missing from runtime storage".into(),
+			Self::MissingMandatoryCodeEntry =>
+				"Mandatory :code: entry is missing from runtime storage".into(),
 			Self::StorageProofError(e) => format!("Error when parsing storage proof: {:?}", e),
 			Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health),
 			Self::TransactionInvalid(e) => format!("Substrate transaction is invalid: {:?}", e),
diff --git a/bridges/relays/client-substrate/src/finality_source.rs b/bridges/relays/client-substrate/src/finality_source.rs
index 81a98d2f1e9c69cf1e9281e93f38f4b608c2f352..0059429dcb418b54dc5988ea13811b28e39bc2d9 100644
--- a/bridges/relays/client-substrate/src/finality_source.rs
+++ b/bridges/relays/client-substrate/src/finality_source.rs
@@ -16,10 +16,12 @@
 
 //! Default generic implementation of finality source for basic Substrate client.
 
-use crate::chain::{BlockWithJustification, Chain};
-use crate::client::Client;
-use crate::error::Error;
-use crate::sync_header::SyncHeader;
+use crate::{
+	chain::{BlockWithJustification, Chain},
+	client::Client,
+	error::Error,
+	sync_header::SyncHeader,
+};
 
 use async_std::sync::{Arc, Mutex};
 use async_trait::async_trait;
@@ -43,12 +45,11 @@ pub struct FinalitySource<C: Chain, P> {
 
 impl<C: Chain, P> FinalitySource<C, P> {
 	/// Create new headers source using given client.
-	pub fn new(client: Client<C>, maximal_header_number: Option<RequiredHeaderNumberRef<C>>) -> Self {
-		FinalitySource {
-			client,
-			maximal_header_number,
-			_phantom: Default::default(),
-		}
+	pub fn new(
+		client: Client<C>,
+		maximal_header_number: Option<RequiredHeaderNumberRef<C>>,
+	) -> Self {
+		FinalitySource { client, maximal_header_number, _phantom: Default::default() }
 	}
 
 	/// Returns reference to the underlying RPC client.
@@ -122,7 +123,9 @@ where
 
 		let justification = signed_block
 			.justification()
-			.map(|raw_justification| GrandpaJustification::<C::Header>::decode(&mut raw_justification.as_slice()))
+			.map(|raw_justification| {
+				GrandpaJustification::<C::Header>::decode(&mut raw_justification.as_slice())
+			})
 			.transpose()
 			.map_err(Error::ResponseParseFailed)?;
 
@@ -155,11 +158,11 @@ where
 						Ok(j) => j,
 						Err(err) => {
 							log_error(format!("decode failed with error {:?}", err));
-							continue;
-						}
+							continue
+						},
 					};
 
-					return Some((justification, subscription));
+					return Some((justification, subscription))
 				}
 			},
 		)
diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs
index 093cc50f052a6b6ecc13e94ec89ee8098e85396c..b6ec83f98408cea32fe954808a2c94bd36608670 100644
--- a/bridges/relays/client-substrate/src/guard.rs
+++ b/bridges/relays/client-substrate/src/guard.rs
@@ -52,7 +52,10 @@ pub trait Environment<C: ChainWithBalances>: Send + Sync + 'static {
 }
 
 /// Abort when runtime spec version is different from specified.
-pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environment<C>, expected_spec_version: u32) {
+pub fn abort_on_spec_version_change<C: ChainWithBalances>(
+	mut env: impl Environment<C>,
+	expected_spec_version: u32,
+) {
 	async_std::task::spawn(async move {
 		loop {
 			let actual_spec_version = env.runtime_version().await;
@@ -68,7 +71,7 @@ pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environm
 					);
 
 					env.abort().await;
-				}
+				},
 				Err(error) => log::warn!(
 					target: "bridge-guard",
 					"Failed to read {} runtime version: {:?}. Relay may need to be stopped manually",
@@ -83,7 +86,8 @@ pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environm
 }
 
 /// Abort if, during 24 hours, free balance of given account is decreased at least by given value.
-/// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard.
+/// Other components may increase (or decrease) balance of account and it WILL affect logic of the
+/// guard.
 pub fn abort_when_account_balance_decreased<C: ChainWithBalances>(
 	mut env: impl Environment<C>,
 	account_id: C::AccountId,
@@ -129,7 +133,7 @@ pub fn abort_when_account_balance_decreased<C: ChainWithBalances>(
 
 						env.abort().await;
 					}
-				}
+				},
 				Err(error) => {
 					log::warn!(
 						target: "bridge-guard",
@@ -138,7 +142,7 @@ pub fn abort_when_account_balance_decreased<C: ChainWithBalances>(
 						account_id,
 						error,
 					);
-				}
+				},
 			};
 
 			env.sleep(conditions_check_delay::<C>()).await;
@@ -158,9 +162,7 @@ impl<C: ChainWithBalances> Environment<C> for Client<C> {
 	}
 
 	async fn free_native_balance(&mut self, account: C::AccountId) -> Result<C::Balance, String> {
-		Client::<C>::free_native_balance(self, account)
-			.await
-			.map_err(|e| e.to_string())
+		Client::<C>::free_native_balance(self, account).await.map_err(|e| e.to_string())
 	}
 }
 
@@ -196,8 +198,9 @@ mod tests {
 		const STORAGE_PROOF_OVERHEAD: u32 = 0;
 		const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 0;
 
-		type SignedBlock =
-			sp_runtime::generic::SignedBlock<sp_runtime::generic::Block<Self::Header, sp_runtime::OpaqueExtrinsic>>;
+		type SignedBlock = sp_runtime::generic::SignedBlock<
+			sp_runtime::generic::Block<Self::Header, sp_runtime::OpaqueExtrinsic>,
+		>;
 		type Call = ();
 		type WeightToFee = IdentityFee<u32>;
 	}
@@ -257,10 +260,7 @@ mod tests {
 
 			// client responds with wrong version
 			runtime_version_tx
-				.send(RuntimeVersion {
-					spec_version: 42,
-					..Default::default()
-				})
+				.send(RuntimeVersion { spec_version: 42, ..Default::default() })
 				.await
 				.unwrap();
 
@@ -292,10 +292,7 @@ mod tests {
 
 			// client responds with the same version
 			runtime_version_tx
-				.send(RuntimeVersion {
-					spec_version: 42,
-					..Default::default()
-				})
+				.send(RuntimeVersion { spec_version: 42, ..Default::default() })
 				.await
 				.unwrap();
 
diff --git a/bridges/relays/client-substrate/src/headers_source.rs b/bridges/relays/client-substrate/src/headers_source.rs
index 3dfcb220de4530b38db1e7c3a0ca3b68feb51dff..e3839bf2c8ba103cd52335458797d688c7cb4525 100644
--- a/bridges/relays/client-substrate/src/headers_source.rs
+++ b/bridges/relays/client-substrate/src/headers_source.rs
@@ -16,9 +16,11 @@
 
 //! Default generic implementation of headers source for basic Substrate client.
 
-use crate::chain::{BlockWithJustification, Chain};
-use crate::client::Client;
-use crate::error::Error;
+use crate::{
+	chain::{BlockWithJustification, Chain},
+	client::Client,
+	error::Error,
+};
 
 use async_trait::async_trait;
 use headers_relay::{
@@ -38,19 +40,13 @@ pub struct HeadersSource<C: Chain, P> {
 impl<C: Chain, P> HeadersSource<C, P> {
 	/// Create new headers source using given client.
 	pub fn new(client: Client<C>) -> Self {
-		HeadersSource {
-			client,
-			_phantom: Default::default(),
-		}
+		HeadersSource { client, _phantom: Default::default() }
 	}
 }
 
 impl<C: Chain, P> Clone for HeadersSource<C, P> {
 	fn clone(&self) -> Self {
-		HeadersSource {
-			client: self.client.clone(),
-			_phantom: Default::default(),
-		}
+		HeadersSource { client: self.client.clone(), _phantom: Default::default() }
 	}
 }
 
@@ -69,7 +65,12 @@ where
 	C: Chain,
 	C::BlockNumber: relay_utils::BlockNumberBase,
 	C::Header: Into<P::Header>,
-	P: HeadersSyncPipeline<Extra = (), Completion = EncodedJustification, Hash = C::Hash, Number = C::BlockNumber>,
+	P: HeadersSyncPipeline<
+		Extra = (),
+		Completion = EncodedJustification,
+		Hash = C::Hash,
+		Number = C::BlockNumber,
+	>,
 	P::Header: SourceHeader<C::Hash, C::BlockNumber>,
 {
 	async fn best_block_number(&self) -> Result<P::Number, Error> {
@@ -79,22 +80,17 @@ where
 	}
 
 	async fn header_by_hash(&self, hash: P::Hash) -> Result<P::Header, Error> {
-		self.client
-			.header_by_hash(hash)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into)
 	}
 
 	async fn header_by_number(&self, number: P::Number) -> Result<P::Header, Error> {
-		self.client
-			.header_by_number(number)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_number(number).await.map(Into::into).map_err(Into::into)
 	}
 
-	async fn header_completion(&self, id: HeaderIdOf<P>) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Error> {
+	async fn header_completion(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Error> {
 		let hash = id.1;
 		let signed_block = self.client.get_block(Some(hash)).await?;
 		let grandpa_justification = signed_block.justification().cloned();
@@ -102,7 +98,11 @@ where
 		Ok((id, grandpa_justification))
 	}
 
-	async fn header_extra(&self, id: HeaderIdOf<P>, _header: QueuedHeader<P>) -> Result<(HeaderIdOf<P>, ()), Error> {
+	async fn header_extra(
+		&self,
+		id: HeaderIdOf<P>,
+		_header: QueuedHeader<P>,
+	) -> Result<(HeaderIdOf<P>, ()), Error> {
 		Ok((id, ()))
 	}
 }
diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs
index d801569df32fd977c8d043efe203ed85b7af4c4a..634bdcdca196e5c1ba367347f78865ec3f21676c 100644
--- a/bridges/relays/client-substrate/src/lib.rs
+++ b/bridges/relays/client-substrate/src/lib.rs
@@ -31,16 +31,18 @@ pub mod metrics;
 
 use std::time::Duration;
 
-pub use crate::chain::{
-	BlockWithJustification, CallOf, Chain, ChainWithBalances, TransactionSignScheme, TransactionStatusOf,
-	UnsignedTransaction, WeightToFeeOf,
+pub use crate::{
+	chain::{
+		BlockWithJustification, CallOf, Chain, ChainWithBalances, TransactionSignScheme,
+		TransactionStatusOf, UnsignedTransaction, WeightToFeeOf,
+	},
+	client::{Client, OpaqueGrandpaAuthoritiesSet, Subscription},
+	error::{Error, Result},
+	sync_header::SyncHeader,
 };
-pub use crate::client::{Client, OpaqueGrandpaAuthoritiesSet, Subscription};
-pub use crate::error::{Error, Result};
-pub use crate::sync_header::SyncHeader;
 pub use bp_runtime::{
-	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf, IndexOf, SignatureOf,
-	TransactionEra, TransactionEraOf,
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf,
+	IndexOf, SignatureOf, TransactionEra, TransactionEraOf,
 };
 
 /// Header id used by the chain.
@@ -59,11 +61,7 @@ pub struct ConnectionParams {
 
 impl Default for ConnectionParams {
 	fn default() -> Self {
-		ConnectionParams {
-			host: "localhost".into(),
-			port: 9944,
-			secure: false,
-		}
+		ConnectionParams { host: "localhost".into(), port: 9944, secure: false }
 	}
 }
 
@@ -73,7 +71,11 @@ impl Default for ConnectionParams {
 /// been mined for this period.
 ///
 /// Returns `None` if mortality period is `None`
-pub fn transaction_stall_timeout(mortality_period: Option<u32>, average_block_interval: Duration) -> Option<Duration> {
+pub fn transaction_stall_timeout(
+	mortality_period: Option<u32>,
+	average_block_interval: Duration,
+) -> Option<Duration> {
 	// 1 extra block for transaction to reach the pool && 1 for relayer to awake after it is mined
-	mortality_period.map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1))
+	mortality_period
+		.map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1))
 }
diff --git a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
index 1b9a3f824edc95365a796548ae9833f2d06ad506..f591a7a98105e1ea76e22ca72630670fd9ae8346 100644
--- a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
+++ b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
@@ -14,8 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::chain::Chain;
-use crate::client::Client;
+use crate::{chain::Chain, client::Client};
 
 use async_std::sync::{Arc, RwLock};
 use async_trait::async_trait;
@@ -83,7 +82,8 @@ where
 			.await
 			.map(|maybe_storage_value| {
 				maybe_storage_value.or(self.maybe_default_value).map(|storage_value| {
-					storage_value.into_inner().unique_saturated_into() as f64 / T::DIV.unique_saturated_into() as f64
+					storage_value.into_inner().unique_saturated_into() as f64 /
+						T::DIV.unique_saturated_into() as f64
 				})
 			})
 			.map_err(drop);
diff --git a/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
index 526fe1e048bfcc9f2b77940e2f4829ae35d9c0fc..c3b69c32f5728dfd9b823d24c6410c0c14b7d6a2 100644
--- a/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
+++ b/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
@@ -14,12 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::chain::Chain;
-use crate::client::Client;
-use crate::error::Error;
+use crate::{chain::Chain, client::Client, error::Error};
 
 use async_trait::async_trait;
-use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64};
+use relay_utils::metrics::{
+	metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64,
+};
 use sp_core::storage::StorageKey;
 use sp_runtime::traits::Header as HeaderT;
 use sp_storage::well_known_keys::CODE;
@@ -40,10 +40,7 @@ pub struct StorageProofOverheadMetric<C: Chain> {
 
 impl<C: Chain> Clone for StorageProofOverheadMetric<C> {
 	fn clone(&self) -> Self {
-		StorageProofOverheadMetric {
-			client: self.client.clone(),
-			metric: self.metric.clone(),
-		}
+		StorageProofOverheadMetric { client: self.client.clone(), metric: self.metric.clone() }
 	}
 }
 
@@ -73,15 +70,15 @@ impl<C: Chain> StorageProofOverheadMetric<C> {
 			.await?;
 		let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum();
 
-		let storage_value_reader =
-			bp_runtime::StorageProofChecker::<C::Hasher>::new(*best_header.state_root(), storage_proof)
-				.map_err(Error::StorageProofError)?;
-		let maybe_encoded_storage_value = storage_value_reader
-			.read_value(CODE)
-			.map_err(Error::StorageProofError)?;
-		let encoded_storage_value_size = maybe_encoded_storage_value
-			.ok_or(Error::MissingMandatoryCodeEntry)?
-			.len();
+		let storage_value_reader = bp_runtime::StorageProofChecker::<C::Hasher>::new(
+			*best_header.state_root(),
+			storage_proof,
+		)
+		.map_err(Error::StorageProofError)?;
+		let maybe_encoded_storage_value =
+			storage_value_reader.read_value(CODE).map_err(Error::StorageProofError)?;
+		let encoded_storage_value_size =
+			maybe_encoded_storage_value.ok_or(Error::MissingMandatoryCodeEntry)?.len();
 
 		Ok(storage_proof_size - encoded_storage_value_size)
 	}
diff --git a/bridges/relays/client-wococo/src/lib.rs b/bridges/relays/client-wococo/src/lib.rs
index e6a0c0649b7ddce1a63e9238d8f3844fe98a09af..d61915ec123708580ac117f4ffbabdeddddde0c8 100644
--- a/bridges/relays/client-wococo/src/lib.rs
+++ b/bridges/relays/client-wococo/src/lib.rs
@@ -18,7 +18,8 @@
 
 use codec::Encode;
 use relay_substrate_client::{
-	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme, UnsignedTransaction,
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
 };
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
@@ -78,7 +79,13 @@ impl TransactionSignScheme for Wococo {
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
 			unsigned.call,
-			bp_wococo::SignedExtensions::new(bp_wococo::VERSION, era, genesis_hash, unsigned.nonce, unsigned.tip),
+			bp_wococo::SignedExtensions::new(
+				bp_wococo::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
 		)
 		.expect("SignedExtension never fails.");
 
@@ -101,17 +108,15 @@ impl TransactionSignScheme for Wococo {
 	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
 		tx.signature
 			.as_ref()
-			.map(|(address, _, _)| *address == bp_wococo::AccountId::from(*signer.public().as_array_ref()).into())
+			.map(|(address, _, _)| {
+				*address == bp_wococo::AccountId::from(*signer.public().as_array_ref()).into()
+			})
 			.unwrap_or(false)
 	}
 
 	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
 		let extra = &tx.signature.as_ref()?.2;
-		Some(UnsignedTransaction {
-			call: tx.function,
-			nonce: extra.nonce(),
-			tip: extra.tip(),
-		})
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
 	}
 }
 
diff --git a/bridges/relays/client-wococo/src/runtime.rs b/bridges/relays/client-wococo/src/runtime.rs
index daf20156d69e81d0b2c4448d3ef79e1a59ff84d1..c4835372306e5daedcc0720d9292f891e1d285ec 100644
--- a/bridges/relays/client-wococo/src/runtime.rs
+++ b/bridges/relays/client-wococo/src/runtime.rs
@@ -115,7 +115,9 @@ pub enum BridgeMessagesRococoCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_rococo::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_rococo::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/bridges/relays/exchange/src/exchange.rs b/bridges/relays/exchange/src/exchange.rs
index b87b99ee4207edbea5f8c1ccc8b2a460278b8e10..b48a094ee3d5dfe664f1de664665b86773c7c096 100644
--- a/bridges/relays/exchange/src/exchange.rs
+++ b/bridges/relays/exchange/src/exchange.rs
@@ -18,7 +18,8 @@
 
 use async_trait::async_trait;
 use relay_utils::{
-	relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError,
+	relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError,
+	StringifiedMaybeConnectionError,
 };
 use std::{
 	fmt::{Debug, Display},
@@ -96,12 +97,18 @@ pub trait SourceClient<P: TransactionProofPipeline>: RelayClient {
 	async fn block_by_hash(&self, hash: BlockHashOf<P>) -> Result<P::Block, Self::Error>;
 	/// Get canonical block by number.
 	async fn block_by_number(&self, number: BlockNumberOf<P>) -> Result<P::Block, Self::Error>;
-	/// Return block + index where transaction has been **mined**. May return `Ok(None)` if transaction
-	/// is unknown to the source node.
-	async fn transaction_block(&self, hash: &TransactionHashOf<P>)
-		-> Result<Option<(HeaderId<P>, usize)>, Self::Error>;
+	/// Return block + index where transaction has been **mined**. May return `Ok(None)` if
+	/// transaction is unknown to the source node.
+	async fn transaction_block(
+		&self,
+		hash: &TransactionHashOf<P>,
+	) -> Result<Option<(HeaderId<P>, usize)>, Self::Error>;
 	/// Prepare transaction proof.
-	async fn transaction_proof(&self, block: &P::Block, tx_index: usize) -> Result<P::TransactionProof, Self::Error>;
+	async fn transaction_proof(
+		&self,
+		block: &P::Block,
+		tx_index: usize,
+	) -> Result<P::TransactionProof, Self::Error>;
 }
 
 /// Target client API.
@@ -116,9 +123,13 @@ pub trait TargetClient<P: TransactionProofPipeline>: RelayClient {
 	/// Returns best finalized header id.
 	async fn best_finalized_header_id(&self) -> Result<HeaderId<P>, Self::Error>;
 	/// Returns `Ok(true)` if transaction proof is need to be relayed.
-	async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result<bool, Self::Error>;
+	async fn filter_transaction_proof(
+		&self,
+		proof: &P::TransactionProof,
+	) -> Result<bool, Self::Error>;
 	/// Submits transaction proof to the target node.
-	async fn submit_transaction_proof(&self, proof: P::TransactionProof) -> Result<(), Self::Error>;
+	async fn submit_transaction_proof(&self, proof: P::TransactionProof)
+		-> Result<(), Self::Error>;
 }
 
 /// Block transaction statistics.
@@ -154,27 +165,28 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 	for (source_tx_index, source_tx) in transactions_to_process {
 		let result = async {
 			let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index);
-			let source_tx_proof =
-				prepare_transaction_proof(source_client, &source_tx_id, source_block, source_tx_index)
-					.await
-					.map_err(|e| (FailedClient::Source, e))?;
+			let source_tx_proof = prepare_transaction_proof(
+				source_client,
+				&source_tx_id,
+				source_block,
+				source_tx_index,
+			)
+			.await
+			.map_err(|e| (FailedClient::Source, e))?;
 
 			let needs_to_be_relayed =
-				target_client
-					.filter_transaction_proof(&source_tx_proof)
-					.await
-					.map_err(|err| {
-						(
-							FailedClient::Target,
-							StringifiedMaybeConnectionError::new(
-								err.is_connection_error(),
-								format!("Transaction filtering has failed with {:?}", err),
-							),
-						)
-					})?;
+				target_client.filter_transaction_proof(&source_tx_proof).await.map_err(|err| {
+					(
+						FailedClient::Target,
+						StringifiedMaybeConnectionError::new(
+							err.is_connection_error(),
+							format!("Transaction filtering has failed with {:?}", err),
+						),
+					)
+				})?;
 
 			if !needs_to_be_relayed {
-				return Ok(false);
+				return Ok(false)
 			}
 
 			relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof)
@@ -191,13 +203,14 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 		// Option#1 may seems better, but:
 		// 1) we do not track if transaction is mined (without an error) by the target node;
 		// 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx
-		//    has invalid format) && we'll end up in infinite loop of retrying the same transaction proof.
+		//    has invalid format) && we'll end up in infinite loop of retrying the same transaction
+		// proof.
 		//
 		// So we're going with option#2 here (the only exception are connection errors).
 		match result {
 			Ok(false) => {
 				relayed_transactions.processed += 1;
-			}
+			},
 			Ok(true) => {
 				log::info!(
 					target: "bridge",
@@ -209,7 +222,7 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 
 				relayed_transactions.processed += 1;
 				relayed_transactions.relayed += 1;
-			}
+			},
 			Err((failed_client, err)) => {
 				log::error!(
 					target: "bridge",
@@ -226,12 +239,12 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 				);
 
 				if err.is_connection_error() {
-					return Err((failed_client, relayed_transactions));
+					return Err((failed_client, relayed_transactions))
 				}
 
 				relayed_transactions.processed += 1;
 				relayed_transactions.failed += 1;
-			}
+			},
 		}
 	}
 
@@ -245,7 +258,8 @@ pub async fn relay_single_transaction_proof<P: TransactionProofPipeline>(
 	source_tx_hash: TransactionHashOf<P>,
 ) -> Result<(), String> {
 	// wait for transaction and header on source node
-	let (source_header_id, source_tx_index) = wait_transaction_mined(source_client, &source_tx_hash).await?;
+	let (source_header_id, source_tx_index) =
+		wait_transaction_mined(source_client, &source_tx_hash).await?;
 	let source_block = source_client.block_by_hash(source_header_id.1.clone()).await;
 	let source_block = source_block.map_err(|err| {
 		format!(
@@ -302,20 +316,17 @@ async fn relay_ready_transaction_proof<P: TransactionProofPipeline>(
 	source_tx_id: &str,
 	source_tx_proof: P::TransactionProof,
 ) -> Result<(), StringifiedMaybeConnectionError> {
-	target_client
-		.submit_transaction_proof(source_tx_proof)
-		.await
-		.map_err(|err| {
-			StringifiedMaybeConnectionError::new(
-				err.is_connection_error(),
-				format!(
-					"Error submitting transaction {} proof to {} node: {:?}",
-					source_tx_id,
-					P::TARGET_NAME,
-					err,
-				),
-			)
-		})
+	target_client.submit_transaction_proof(source_tx_proof).await.map_err(|err| {
+		StringifiedMaybeConnectionError::new(
+			err.is_connection_error(),
+			format!(
+				"Error submitting transaction {} proof to {} node: {:?}",
+				source_tx_id,
+				P::TARGET_NAME,
+				err,
+			),
+		)
+	})
 }
 
 /// Wait until transaction is mined by source node.
@@ -324,14 +335,15 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 	source_tx_hash: &TransactionHashOf<P>,
 ) -> Result<(HeaderId<P>, usize), String> {
 	loop {
-		let source_header_and_tx = source_client.transaction_block(source_tx_hash).await.map_err(|err| {
-			format!(
-				"Error retrieving transaction {} from {} node: {:?}",
-				source_tx_hash,
-				P::SOURCE_NAME,
-				err,
-			)
-		})?;
+		let source_header_and_tx =
+			source_client.transaction_block(source_tx_hash).await.map_err(|err| {
+				format!(
+					"Error retrieving transaction {} from {} node: {:?}",
+					source_tx_hash,
+					P::SOURCE_NAME,
+					err,
+				)
+			})?;
 		match source_header_and_tx {
 			Some((source_header_id, source_tx)) => {
 				log::info!(
@@ -341,8 +353,8 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 					P::SOURCE_NAME,
 				);
 
-				return Ok((source_header_id, source_tx));
-			}
+				return Ok((source_header_id, source_tx))
+			},
 			None => {
 				log::info!(
 					target: "bridge",
@@ -352,7 +364,7 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 				);
 
 				source_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -363,15 +375,16 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 	source_header_id: &HeaderId<P>,
 ) -> Result<(), String> {
 	loop {
-		let is_header_known = target_client.is_header_known(source_header_id).await.map_err(|err| {
-			format!(
-				"Failed to check existence of header {}/{} on {} node: {:?}",
-				source_header_id.0,
-				source_header_id.1,
-				P::TARGET_NAME,
-				err,
-			)
-		})?;
+		let is_header_known =
+			target_client.is_header_known(source_header_id).await.map_err(|err| {
+				format!(
+					"Failed to check existence of header {}/{} on {} node: {:?}",
+					source_header_id.0,
+					source_header_id.1,
+					P::TARGET_NAME,
+					err,
+				)
+			})?;
 		match is_header_known {
 			true => {
 				log::info!(
@@ -382,8 +395,8 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 					P::TARGET_NAME,
 				);
 
-				return Ok(());
-			}
+				return Ok(())
+			},
 			false => {
 				log::info!(
 					target: "bridge",
@@ -394,7 +407,7 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 				);
 
 				target_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -405,10 +418,8 @@ async fn wait_header_finalized<P: TransactionProofPipeline>(
 	source_header_id: &HeaderId<P>,
 ) -> Result<(), String> {
 	loop {
-		let is_header_finalized = target_client
-			.is_header_finalized(source_header_id)
-			.await
-			.map_err(|err| {
+		let is_header_finalized =
+			target_client.is_header_finalized(source_header_id).await.map_err(|err| {
 				format!(
 					"Failed to check finality of header {}/{} on {} node: {:?}",
 					source_header_id.0,
@@ -427,8 +438,8 @@ async fn wait_header_finalized<P: TransactionProofPipeline>(
 					P::TARGET_NAME,
 				);
 
-				return Ok(());
-			}
+				return Ok(())
+			},
 			false => {
 				log::info!(
 					target: "bridge",
@@ -439,7 +450,7 @@ async fn wait_header_finalized<P: TransactionProofPipeline>(
 				);
 
 				target_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -582,15 +593,22 @@ pub(crate) mod tests {
 			self.data.lock().block.clone()
 		}
 
-		async fn transaction_block(&self, _: &TestTransactionHash) -> Result<Option<(TestHeaderId, usize)>, TestError> {
+		async fn transaction_block(
+			&self,
+			_: &TestTransactionHash,
+		) -> Result<Option<(TestHeaderId, usize)>, TestError> {
 			self.data.lock().transaction_block.clone()
 		}
 
-		async fn transaction_proof(&self, block: &TestBlock, index: usize) -> Result<TestTransactionProof, TestError> {
+		async fn transaction_proof(
+			&self,
+			block: &TestBlock,
+			index: usize,
+		) -> Result<TestTransactionProof, TestError> {
 			let tx_hash = block.1[index].hash();
 			let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned();
 			if let Some(err) = proof_error {
-				return Err(err);
+				return Err(err)
 			}
 
 			Ok(TestTransactionProof(tx_hash))
@@ -653,19 +671,32 @@ pub(crate) mod tests {
 			self.data.lock().best_finalized_header_id.clone()
 		}
 
-		async fn filter_transaction_proof(&self, proof: &TestTransactionProof) -> Result<bool, TestError> {
+		async fn filter_transaction_proof(
+			&self,
+			proof: &TestTransactionProof,
+		) -> Result<bool, TestError> {
 			Ok(self.data.lock().transactions_to_accept.contains(&proof.0))
 		}
 
-		async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> {
+		async fn submit_transaction_proof(
+			&self,
+			proof: TestTransactionProof,
+		) -> Result<(), TestError> {
 			self.data.lock().submitted_proofs.push(proof);
 			Ok(())
 		}
 	}
 
-	fn ensure_relay_single_success(source: &TestTransactionsSource, target: &TestTransactionsTarget) {
+	fn ensure_relay_single_success(
+		source: &TestTransactionsSource,
+		target: &TestTransactionsTarget,
+	) {
 		assert_eq!(
-			async_std::task::block_on(relay_single_transaction_proof(source, target, test_transaction_hash(0),)),
+			async_std::task::block_on(relay_single_transaction_proof(
+				source,
+				target,
+				test_transaction_hash(0),
+			)),
 			Ok(()),
 		);
 		assert_eq!(
@@ -782,11 +813,7 @@ pub(crate) mod tests {
 		let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed")));
 		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed")));
 
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.remove(&test_transaction_hash(0));
+		target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0));
 
 		ensure_relay_single_success(&source, &target)
 	}
@@ -814,25 +841,14 @@ pub(crate) mod tests {
 		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed")));
 
 		// let's only accept tx#1
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.remove(&test_transaction_hash(0));
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.insert(test_transaction_hash(1));
+		target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0));
+		target.data.lock().transactions_to_accept.insert(test_transaction_hash(1));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 1,
-				failed: 0,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 1, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
@@ -852,14 +868,11 @@ pub(crate) mod tests {
 			.proofs_to_fail
 			.insert(test_transaction_hash(0), TestError(false));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 0,
-				failed: 1,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 0, failed: 1 }),
 		);
 		assert_eq!(target.data.lock().submitted_proofs, vec![],);
 	}
@@ -876,14 +889,11 @@ pub(crate) mod tests {
 			.proofs_to_fail
 			.insert(test_transaction_hash(1), TestError(true));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Err(RelayedBlockTransactions {
-				processed: 1,
-				relayed: 1,
-				failed: 0,
-			}),
+			Err(RelayedBlockTransactions { processed: 1, relayed: 1, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
@@ -893,20 +903,13 @@ pub(crate) mod tests {
 		// now do not fail on tx#2
 		source.data.lock().proofs_to_fail.clear();
 		// and also relay tx#3
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.insert(test_transaction_hash(2));
+		target.data.lock().transactions_to_accept.insert(test_transaction_hash(2));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 2,
-				failed: 0,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 2, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
diff --git a/bridges/relays/exchange/src/exchange_loop.rs b/bridges/relays/exchange/src/exchange_loop.rs
index 0c13f1b5e525987a6a8069bbfb8b17acd8247a93..2b19a30e2124431edd5eb6789c3da891610d7de2 100644
--- a/bridges/relays/exchange/src/exchange_loop.rs
+++ b/bridges/relays/exchange/src/exchange_loop.rs
@@ -16,11 +16,13 @@
 
 //! Relaying proofs of exchange transactions.
 
-use crate::exchange::{
-	relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, TargetClient,
-	TransactionProofPipeline,
+use crate::{
+	exchange::{
+		relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient,
+		TargetClient, TransactionProofPipeline,
+	},
+	exchange_loop_metrics::ExchangeLoopMetrics,
 };
-use crate::exchange_loop_metrics::ExchangeLoopMetrics;
 
 use backoff::backoff::Backoff;
 use futures::{future::FutureExt, select};
@@ -58,13 +60,13 @@ pub struct InMemoryStorage<BlockNumber> {
 impl<BlockNumber> InMemoryStorage<BlockNumber> {
 	/// Created new in-memory storage with given best processed block number.
 	pub fn new(best_processed_header_number: BlockNumber) -> Self {
-		InMemoryStorage {
-			best_processed_header_number,
-		}
+		InMemoryStorage { best_processed_header_number }
 	}
 }
 
-impl<BlockNumber: 'static + Clone + Copy + Send + Sync> TransactionProofsRelayStorage for InMemoryStorage<BlockNumber> {
+impl<BlockNumber: 'static + Clone + Copy + Send + Sync> TransactionProofsRelayStorage
+	for InMemoryStorage<BlockNumber>
+{
 	type BlockNumber = BlockNumber;
 
 	fn state(&self) -> TransactionProofsRelayState<BlockNumber> {
@@ -140,12 +142,11 @@ async fn run_until_connection_lost<P: TransactionProofPipeline>(
 
 		if let Err((is_connection_error, failed_client)) = iteration_result {
 			if is_connection_error {
-				return Err(failed_client);
+				return Err(failed_client)
 			}
 
-			let retry_timeout = retry_backoff
-				.next_backoff()
-				.unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY);
+			let retry_timeout =
+				retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY);
 			select! {
 				_ = async_std::task::sleep(retry_timeout).fuse() => {},
 				_ = exit_signal => return Ok(()),
@@ -181,7 +182,7 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 			);
 
 			best_finalized_header_id
-		}
+		},
 		Err(err) => {
 			log::error!(
 				target: "bridge",
@@ -191,14 +192,20 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 				err,
 			);
 
-			return Err((err.is_connection_error(), FailedClient::Target));
-		}
+			return Err((err.is_connection_error(), FailedClient::Target))
+		},
 	};
 
 	loop {
 		// if we already have some finalized block body, try to relay its transactions
 		if let Some((block, relayed_transactions)) = current_finalized_block.take() {
-			let result = relay_block_transactions(source_client, target_client, &block, relayed_transactions).await;
+			let result = relay_block_transactions(
+				source_client,
+				target_client,
+				&block,
+				relayed_transactions,
+			)
+			.await;
 
 			match result {
 				Ok(relayed_transactions) => {
@@ -212,7 +219,8 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 						relayed_transactions.failed,
 					);
 
-					state.best_processed_header_number = state.best_processed_header_number + One::one();
+					state.best_processed_header_number =
+						state.best_processed_header_number + One::one();
 					storage.set_state(state);
 
 					if let Some(exchange_loop_metrics) = exchange_loop_metrics {
@@ -224,11 +232,11 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 					}
 
 					// we have just updated state => proceed to next block retrieval
-				}
+				},
 				Err((failed_client, relayed_transactions)) => {
 					*current_finalized_block = Some((block, relayed_transactions));
-					return Err((true, failed_client));
-				}
+					return Err((true, failed_client))
+				},
 			}
 		}
 
@@ -242,8 +250,8 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 					*current_finalized_block = Some((block, RelayedBlockTransactions::default()));
 
 					// we have received new finalized block => go back to relay its transactions
-					continue;
-				}
+					continue
+				},
 				Err(err) => {
 					log::error!(
 						target: "bridge",
@@ -253,13 +261,13 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 						err,
 					);
 
-					return Err((err.is_connection_error(), FailedClient::Source));
-				}
+					return Err((err.is_connection_error(), FailedClient::Source))
+				},
 			}
 		}
 
 		// there are no any transactions we need to relay => wait for new data
-		return Ok(());
+		return Ok(())
 	}
 }
 
@@ -267,17 +275,16 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 mod tests {
 	use super::*;
 	use crate::exchange::tests::{
-		test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, TestTransactionsSource,
-		TestTransactionsTarget,
+		test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof,
+		TestTransactionsSource, TestTransactionsTarget,
 	};
 	use futures::{future::FutureExt, stream::StreamExt};
 
 	#[test]
 	fn exchange_loop_is_able_to_relay_proofs() {
-		let storage = InMemoryStorage {
-			best_processed_header_number: 0,
-		};
-		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed")));
+		let storage = InMemoryStorage { best_processed_header_number: 0 };
+		let target =
+			TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed")));
 		let target_data = target.data.clone();
 		let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded();
 
@@ -295,11 +302,8 @@ mod tests {
 				(true, false) => {
 					data.block = Ok(test_next_block());
 					target_data.lock().best_finalized_header_id = Ok(test_next_block_id());
-					target_data
-						.lock()
-						.transactions_to_accept
-						.insert(test_transaction_hash(1));
-				}
+					target_data.lock().transactions_to_accept.insert(test_transaction_hash(1));
+				},
 				_ => (),
 			}
 		}));
diff --git a/bridges/relays/finality/src/finality_loop.rs b/bridges/relays/finality/src/finality_loop.rs
index 9a2795802d0793f9849c721f648f33e1d7cb462c..adfd3927de89842be054bd42d5fa69543f69d86f 100644
--- a/bridges/relays/finality/src/finality_loop.rs
+++ b/bridges/relays/finality/src/finality_loop.rs
@@ -43,18 +43,19 @@ pub struct FinalitySyncParams {
 	/// `min(source_block_time, target_block_time)`.
 	///
 	/// This parameter may be used to limit transactions rate. Increase the value && you'll get
-	/// infrequent updates => sparse headers => potential slow down of bridge applications, but pallet storage
-	/// won't be super large. Decrease the value to near `source_block_time` and you'll get
-	/// transaction for (almost) every block of the source chain => all source headers will be known
-	/// to the target chain => bridge applications will run faster, but pallet storage may explode
-	/// (but if pruning is there, then it's fine).
+	/// infrequent updates => sparse headers => potential slow down of bridge applications, but
+	/// pallet storage won't be super large. Decrease the value to near `source_block_time` and
+	/// you'll get transaction for (almost) every block of the source chain => all source headers
+	/// will be known to the target chain => bridge applications will run faster, but pallet
+	/// storage may explode (but if pruning is there, then it's fine).
 	pub tick: Duration,
 	/// Number of finality proofs to keep in internal buffer between loop iterations.
 	///
-	/// While in "major syncing" state, we still read finality proofs from the stream. They're stored
-	/// in the internal buffer between loop iterations. When we're close to the tip of the chain, we may
-	/// meet finality delays if headers are not finalized frequently. So instead of waiting for next
-	/// finality proof to appear in the stream, we may use existing proof from that buffer.
+	/// While in "major syncing" state, we still read finality proofs from the stream. They're
+	/// stored in the internal buffer between loop iterations. When we're close to the tip of the
+	/// chain, we may meet finality delays if headers are not finalized frequently. So instead of
+	/// waiting for next finality proof to appear in the stream, we may use existing proof from
+	/// that buffer.
 	pub recent_finality_proofs_limit: usize,
 	/// Timeout before we treat our transactions as lost and restart the whole sync process.
 	pub stall_timeout: Duration,
@@ -89,10 +90,15 @@ pub trait TargetClient<P: FinalitySyncPipeline>: RelayClient {
 	async fn best_finalized_source_block_number(&self) -> Result<P::Number, Self::Error>;
 
 	/// Submit header finality proof.
-	async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), Self::Error>;
+	async fn submit_finality_proof(
+		&self,
+		header: P::Header,
+		proof: P::FinalityProof,
+	) -> Result<(), Self::Error>;
 }
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: FinalitySyncPipeline>() -> String {
 	format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME)
 }
@@ -127,15 +133,11 @@ pub async fn run<P: FinalitySyncPipeline>(
 /// Unjustified headers container. Ordered by header number.
 pub(crate) type UnjustifiedHeaders<H> = Vec<H>;
 /// Finality proofs container. Ordered by target header number.
-pub(crate) type FinalityProofs<P> = Vec<(
-	<P as FinalitySyncPipeline>::Number,
-	<P as FinalitySyncPipeline>::FinalityProof,
-)>;
+pub(crate) type FinalityProofs<P> =
+	Vec<(<P as FinalitySyncPipeline>::Number, <P as FinalitySyncPipeline>::FinalityProof)>;
 /// Reference to finality proofs container.
-pub(crate) type FinalityProofsRef<'a, P> = &'a [(
-	<P as FinalitySyncPipeline>::Number,
-	<P as FinalitySyncPipeline>::FinalityProof,
-)];
+pub(crate) type FinalityProofsRef<'a, P> =
+	&'a [(<P as FinalitySyncPipeline>::Number, <P as FinalitySyncPipeline>::FinalityProof)];
 
 /// Error that may happen inside finality synchronization loop.
 #[derive(Debug)]
@@ -186,10 +188,7 @@ pub(crate) struct RestartableFinalityProofsStream<S> {
 #[cfg(test)]
 impl<S> From<S> for RestartableFinalityProofsStream<S> {
 	fn from(stream: S) -> Self {
-		RestartableFinalityProofsStream {
-			needs_restart: false,
-			stream: Box::pin(stream),
-		}
+		RestartableFinalityProofsStream { needs_restart: false, stream: Box::pin(stream) }
 	}
 }
 
@@ -260,14 +259,12 @@ async fn run_until_connection_lost<P: FinalitySyncPipeline>(
 				last_transaction = updated_last_transaction;
 				retry_backoff.reset();
 				sync_params.tick
-			}
+			},
 			Err(error) => {
 				log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error);
 				error.fail_if_connection_error()?;
-				retry_backoff
-					.next_backoff()
-					.unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY)
-			}
+				retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY)
+			},
 		};
 		if finality_proofs_stream.needs_restart {
 			log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME);
@@ -297,10 +294,8 @@ where
 	TC: TargetClient<P>,
 {
 	// read best source headers ids from source and target nodes
-	let best_number_at_source = source_client
-		.best_finalized_block_number()
-		.await
-		.map_err(Error::Source)?;
+	let best_number_at_source =
+		source_client.best_finalized_block_number().await.map_err(Error::Source)?;
 	let best_number_at_target = target_client
 		.best_finalized_source_block_number()
 		.await
@@ -309,7 +304,8 @@ where
 		metrics_sync.update_best_block_at_source(best_number_at_source);
 		metrics_sync.update_best_block_at_target(best_number_at_target);
 	}
-	*state.progress = print_sync_progress::<P>(*state.progress, best_number_at_source, best_number_at_target);
+	*state.progress =
+		print_sync_progress::<P>(*state.progress, best_number_at_source, best_number_at_target);
 
 	// if we have already submitted header, then we just need to wait for it
 	// if we're waiting too much, then we believe our transaction has been lost and restart sync
@@ -324,9 +320,9 @@ where
 				P::TARGET_NAME,
 			);
 
-			return Err(Error::Stalled);
+			return Err(Error::Stalled)
 		} else {
-			return Ok(Some(last_transaction));
+			return Ok(Some(last_transaction))
 		}
 	}
 
@@ -343,10 +339,8 @@ where
 	.await?
 	{
 		Some((header, justification)) => {
-			let new_transaction = Transaction {
-				time: Instant::now(),
-				submitted_header_number: header.number(),
-			};
+			let new_transaction =
+				Transaction { time: Instant::now(), submitted_header_number: header.number() };
 
 			log::debug!(
 				target: "bridge",
@@ -361,7 +355,7 @@ where
 				.await
 				.map_err(Error::Target)?;
 			Ok(Some(new_transaction))
-		}
+		},
 		None => Ok(None),
 	}
 }
@@ -398,15 +392,15 @@ where
 	)
 	.await?;
 	let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof {
-		SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))),
+		SelectedFinalityProof::Mandatory(header, finality_proof) =>
+			return Ok(Some((header, finality_proof))),
 		_ if sync_params.only_mandatory_headers => {
 			// we are not reading finality proofs from the stream, so eventually it'll break
 			// but we don't care about transient proofs at all, so it is acceptable
-			return Ok(None);
-		}
-		SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => {
-			(unjustified_headers, Some((header, finality_proof)))
-		}
+			return Ok(None)
+		},
+		SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) =>
+			(unjustified_headers, Some((header, finality_proof))),
 		SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None),
 	};
 
@@ -451,7 +445,11 @@ pub(crate) enum SelectedFinalityProof<Header, FinalityProof> {
 /// Otherwise, `SelectedFinalityProof::None` is returned.
 ///
 /// Unless we have found mandatory header, all missing headers are collected and returned.
-pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClient<P>, TC: TargetClient<P>>(
+pub(crate) async fn read_missing_headers<
+	P: FinalitySyncPipeline,
+	SC: SourceClient<P>,
+	TC: TargetClient<P>,
+>(
 	source_client: &SC,
 	_target_client: &TC,
 	best_number_at_source: P::Number,
@@ -470,17 +468,17 @@ pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClie
 		match (is_mandatory, finality_proof) {
 			(true, Some(finality_proof)) => {
 				log::trace!(target: "bridge", "Header {:?} is mandatory", header_number);
-				return Ok(SelectedFinalityProof::Mandatory(header, finality_proof));
-			}
+				return Ok(SelectedFinalityProof::Mandatory(header, finality_proof))
+			},
 			(true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())),
 			(false, Some(finality_proof)) => {
 				log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number);
 				unjustified_headers.clear();
 				selected_finality_proof = Some((header, finality_proof));
-			}
+			},
 			(false, None) => {
 				unjustified_headers.push(header);
-			}
+			},
 		}
 
 		header_number = header_number + One::one();
@@ -493,7 +491,10 @@ pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClie
 }
 
 /// Read finality proofs from the stream.
-pub(crate) fn read_finality_proofs_from_stream<P: FinalitySyncPipeline, FPS: Stream<Item = P::FinalityProof>>(
+pub(crate) fn read_finality_proofs_from_stream<
+	P: FinalitySyncPipeline,
+	FPS: Stream<Item = P::FinalityProof>,
+>(
 	finality_proofs_stream: &mut RestartableFinalityProofsStream<FPS>,
 	recent_finality_proofs: &mut FinalityProofs<P>,
 ) {
@@ -506,8 +507,8 @@ pub(crate) fn read_finality_proofs_from_stream<P: FinalitySyncPipeline, FPS: Str
 			Some(Some(finality_proof)) => finality_proof,
 			Some(None) => {
 				finality_proofs_stream.needs_restart = true;
-				break;
-			}
+				break
+			},
 			None => break,
 		};
 
@@ -547,7 +548,7 @@ pub(crate) fn select_better_recent_finality_proof<P: FinalitySyncPipeline>(
 			P::SOURCE_NAME,
 			selected_finality_proof.as_ref().map(|(h, _)| h.number()),
 		);
-		return selected_finality_proof;
+		return selected_finality_proof
 	}
 
 	const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed";
@@ -569,7 +570,8 @@ pub(crate) fn select_better_recent_finality_proof<P: FinalitySyncPipeline>(
 	let selected_finality_proof_index = recent_finality_proofs
 		.binary_search_by_key(intersection.end(), |(number, _)| *number)
 		.unwrap_or_else(|index| index.saturating_sub(1));
-	let (selected_header_number, finality_proof) = &recent_finality_proofs[selected_finality_proof_index];
+	let (selected_header_number, finality_proof) =
+		&recent_finality_proofs[selected_finality_proof_index];
 	let has_selected_finality_proof = intersection.contains(selected_header_number);
 	log::trace!(
 		target: "bridge",
@@ -585,7 +587,7 @@ pub(crate) fn select_better_recent_finality_proof<P: FinalitySyncPipeline>(
 		if has_selected_finality_proof { "improved" } else { "failed" },
 	);
 	if !has_selected_finality_proof {
-		return selected_finality_proof;
+		return selected_finality_proof
 	}
 
 	// now remove all obsolete headers and extract selected header
@@ -601,20 +603,15 @@ pub(crate) fn prune_recent_finality_proofs<P: FinalitySyncPipeline>(
 	recent_finality_proofs: &mut FinalityProofs<P>,
 	recent_finality_proofs_limit: usize,
 ) {
-	let position =
-		recent_finality_proofs.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number);
+	let position = recent_finality_proofs
+		.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number);
 
 	// remove all obsolete elements
-	*recent_finality_proofs = recent_finality_proofs.split_off(
-		position
-			.map(|position| position + 1)
-			.unwrap_or_else(|position| position),
-	);
+	*recent_finality_proofs = recent_finality_proofs
+		.split_off(position.map(|position| position + 1).unwrap_or_else(|position| position));
 
 	// now - limit vec by size
-	let split_index = recent_finality_proofs
-		.len()
-		.saturating_sub(recent_finality_proofs_limit);
+	let split_index = recent_finality_proofs.len().saturating_sub(recent_finality_proofs_limit);
 	*recent_finality_proofs = recent_finality_proofs.split_off(split_index);
 }
 
@@ -626,15 +623,15 @@ fn print_sync_progress<P: FinalitySyncPipeline>(
 	let (prev_time, prev_best_number_at_target) = progress_context;
 	let now = Instant::now();
 
-	let need_update = now - prev_time > Duration::from_secs(10)
-		|| prev_best_number_at_target
+	let need_update = now - prev_time > Duration::from_secs(10) ||
+		prev_best_number_at_target
 			.map(|prev_best_number_at_target| {
 				best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into()
 			})
 			.unwrap_or(true);
 
 	if !need_update {
-		return (prev_time, prev_best_number_at_target);
+		return (prev_time, prev_best_number_at_target)
 	}
 
 	log::info!(
diff --git a/bridges/relays/finality/src/finality_loop_tests.rs b/bridges/relays/finality/src/finality_loop_tests.rs
index e7e0cdb39fb35ed57debd32fe140b40d2d7ca899..915b7ee6766efcd5fdf3f9b62067a80f998bc010 100644
--- a/bridges/relays/finality/src/finality_loop_tests.rs
+++ b/bridges/relays/finality/src/finality_loop_tests.rs
@@ -18,17 +18,21 @@
 
 #![cfg(test)]
 
-use crate::finality_loop::{
-	prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof,
-	select_header_to_submit, FinalityProofs, FinalitySyncParams, RestartableFinalityProofsStream, SourceClient,
-	TargetClient,
+use crate::{
+	finality_loop::{
+		prune_recent_finality_proofs, read_finality_proofs_from_stream, run,
+		select_better_recent_finality_proof, select_header_to_submit, FinalityProofs,
+		FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, TargetClient,
+	},
+	FinalityProof, FinalitySyncPipeline, SourceHeader,
 };
-use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader};
 
 use async_trait::async_trait;
 use futures::{FutureExt, Stream, StreamExt};
 use parking_lot::Mutex;
-use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError};
+use relay_utils::{
+	metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError,
+};
 use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration};
 
 type IsMandatory = bool;
@@ -121,10 +125,7 @@ impl SourceClient<TestFinalitySyncPipeline> for TestSourceClient {
 	) -> Result<(TestSourceHeader, Option<TestFinalityProof>), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(&mut *data);
-		data.source_headers
-			.get(&number)
-			.cloned()
-			.ok_or(TestError::NonConnection)
+		data.source_headers.get(&number).cloned().ok_or(TestError::NonConnection)
 	}
 
 	async fn finality_proofs(&self) -> Result<Self::FinalityProofsStream, TestError> {
@@ -157,7 +158,11 @@ impl TargetClient<TestFinalitySyncPipeline> for TestTargetClient {
 		Ok(data.target_best_block_number)
 	}
 
-	async fn submit_finality_proof(&self, header: TestSourceHeader, proof: TestFinalityProof) -> Result<(), TestError> {
+	async fn submit_finality_proof(
+		&self,
+		header: TestSourceHeader,
+		proof: TestFinalityProof,
+	) -> Result<(), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(&mut *data);
 		data.target_best_block_number = header.number();
@@ -171,11 +176,12 @@ fn prepare_test_clients(
 	state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static,
 	source_headers: HashMap<TestNumber, (TestSourceHeader, Option<TestFinalityProof>)>,
 ) -> (TestSourceClient, TestTargetClient) {
-	let internal_state_function: Arc<dyn Fn(&mut ClientsData) + Send + Sync> = Arc::new(move |data| {
-		if state_function(data) {
-			exit_sender.unbounded_send(()).unwrap();
-		}
-	});
+	let internal_state_function: Arc<dyn Fn(&mut ClientsData) + Send + Sync> =
+		Arc::new(move |data| {
+			if state_function(data) {
+				exit_sender.unbounded_send(()).unwrap();
+			}
+		});
 	let clients_data = Arc::new(Mutex::new(ClientsData {
 		source_best_block_number: 10,
 		source_headers,
@@ -189,14 +195,13 @@ fn prepare_test_clients(
 			on_method_call: internal_state_function.clone(),
 			data: clients_data.clone(),
 		},
-		TestTargetClient {
-			on_method_call: internal_state_function,
-			data: clients_data,
-		},
+		TestTargetClient { on_method_call: internal_state_function, data: clients_data },
 	)
 }
 
-fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData {
+fn run_sync_loop(
+	state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static,
+) -> ClientsData {
 	let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded();
 	let (source_client, target_client) = prepare_test_clients(
 		exit_sender,
@@ -234,12 +239,13 @@ fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync
 #[test]
 fn finality_sync_loop_works() {
 	let client_data = run_sync_loop(|data| {
-		// header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, because
-		// header#8 has persistent finality proof && it is mandatory => it is submitted
-		// header#9 has persistent finality proof, but it isn't mandatory => it is submitted, because
-		//   there are no more persistent finality proofs
+		// header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted,
+		// because header#8 has persistent finality proof && it is mandatory => it is submitted
+		// header#9 has persistent finality proof, but it isn't mandatory => it is submitted,
+		// because   there are no more persistent finality proofs
 		//
-		// once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from the stream
+		// once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from
+		// the stream
 		if data.target_best_block_number == 9 {
 			data.source_best_block_number = 14;
 			data.source_headers.insert(11, (TestSourceHeader(false, 11), None));
@@ -287,10 +293,7 @@ fn run_only_mandatory_headers_mode_test(
 		vec![
 			(6, (TestSourceHeader(false, 6), Some(TestFinalityProof(6)))),
 			(7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))),
-			(
-				8,
-				(TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8))),
-			),
+			(8, (TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8)))),
 			(9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))),
 			(10, (TestSourceHeader(false, 10), Some(TestFinalityProof(10)))),
 		]
@@ -357,7 +360,8 @@ fn select_better_recent_finality_proof_works() {
 		Some((TestSourceHeader(false, 2), TestFinalityProof(2))),
 	);
 
-	// if there's no intersection between recent finality proofs and unjustified headers, nothing is changed
+	// if there's no intersection between recent finality proofs and unjustified headers, nothing is
+	// changed
 	let mut unjustified_headers = vec![TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
@@ -368,13 +372,10 @@ fn select_better_recent_finality_proof_works() {
 		Some((TestSourceHeader(false, 2), TestFinalityProof(2))),
 	);
 
-	// if there's intersection between recent finality proofs and unjustified headers, but there are no
-	// proofs in this intersection, nothing is changed
-	let mut unjustified_headers = vec![
-		TestSourceHeader(false, 8),
-		TestSourceHeader(false, 9),
-		TestSourceHeader(false, 10),
-	];
+	// if there's intersection between recent finality proofs and unjustified headers, but there are
+	// no proofs in this intersection, nothing is changed
+	let mut unjustified_headers =
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
 			&[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))],
@@ -385,22 +386,15 @@ fn select_better_recent_finality_proof_works() {
 	);
 	assert_eq!(
 		unjustified_headers,
-		vec![
-			TestSourceHeader(false, 8),
-			TestSourceHeader(false, 9),
-			TestSourceHeader(false, 10)
-		]
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)]
 	);
 
 	// if there's intersection between recent finality proofs and unjustified headers and there's
 	// a proof in this intersection:
 	// - this better (last from intersection) proof is selected;
 	// - 'obsolete' unjustified headers are pruned.
-	let mut unjustified_headers = vec![
-		TestSourceHeader(false, 8),
-		TestSourceHeader(false, 9),
-		TestSourceHeader(false, 10),
-	];
+	let mut unjustified_headers =
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
 			&[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))],
@@ -416,7 +410,10 @@ fn read_finality_proofs_from_stream_works() {
 	// when stream is currently empty, nothing is changed
 	let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))];
 	let mut stream = futures::stream::pending().into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
+	);
 	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]);
 	assert!(!stream.needs_restart);
 
@@ -424,20 +421,20 @@ fn read_finality_proofs_from_stream_works() {
 	let mut stream = futures::stream::iter(vec![TestFinalityProof(4)])
 		.chain(futures::stream::pending())
 		.into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
-	assert_eq!(
-		recent_finality_proofs,
-		vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
 	);
+	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]);
 	assert!(!stream.needs_restart);
 
 	// when stream has ended, we'll need to restart it
 	let mut stream = futures::stream::empty().into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
-	assert_eq!(
-		recent_finality_proofs,
-		vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
 	);
+	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]);
 	assert!(stream.needs_restart);
 }
 
diff --git a/bridges/relays/finality/src/lib.rs b/bridges/relays/finality/src/lib.rs
index 64ec5bed05005ff4664660b27feb094bee157675..78ef33f1b376b8d2815f3361e02275ef4efb0bab 100644
--- a/bridges/relays/finality/src/lib.rs
+++ b/bridges/relays/finality/src/lib.rs
@@ -19,7 +19,9 @@
 //! are still submitted to the target node, but are treated as auxiliary data as we are not trying
 //! to submit all source headers to the target node.
 
-pub use crate::finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient};
+pub use crate::finality_loop::{
+	metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient,
+};
 
 use bp_header_chain::FinalityProof;
 use std::fmt::Debug;
diff --git a/bridges/relays/headers/src/headers.rs b/bridges/relays/headers/src/headers.rs
index d4f3d77d79ceaee9c666b0f2836a9218482a26f7..8d67c1cf48574edf17856e06341f9e507438c829 100644
--- a/bridges/relays/headers/src/headers.rs
+++ b/bridges/relays/headers/src/headers.rs
@@ -20,22 +20,33 @@
 //! may stay until source/target chain state isn't updated. When a header reaches the
 //! `ready` sub-queue, it may be submitted to the target chain.
 
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader};
+use crate::sync_types::{
+	HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader,
+};
 
 use linked_hash_map::LinkedHashMap;
 use num_traits::{One, Zero};
 use relay_utils::HeaderId;
 use std::{
-	collections::{btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, HashSet},
+	collections::{
+		btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap,
+		HashSet,
+	},
 	time::{Duration, Instant},
 };
 
-type HeadersQueue<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, QueuedHeader<P>>>;
-type SyncedChildren<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, HashSet<HeaderIdOf<P>>>>;
-type KnownHeaders<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, HeaderStatus>>;
+type HeadersQueue<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, QueuedHeader<P>>,
+>;
+type SyncedChildren<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, HashSet<HeaderIdOf<P>>>,
+>;
+type KnownHeaders<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, HeaderStatus>,
+>;
 
 /// We're trying to fetch completion data for single header at this interval.
 const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20);
@@ -113,35 +124,31 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	pub fn headers_in_status(&self, status: HeaderStatus) -> usize {
 		match status {
 			HeaderStatus::Unknown | HeaderStatus::Synced => 0,
-			HeaderStatus::MaybeOrphan => self
-				.maybe_orphan
-				.values()
-				.fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Orphan => self.orphan.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::MaybeExtra => self
-				.maybe_extra
-				.values()
-				.fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Extra => self.extra.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Ready => self.ready.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Incomplete => self.incomplete.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Submitted => self.submitted.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::MaybeOrphan =>
+				self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Orphan =>
+				self.orphan.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::MaybeExtra =>
+				self.maybe_extra.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Extra =>
+				self.extra.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Ready =>
+				self.ready.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Incomplete =>
+				self.incomplete.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Submitted =>
+				self.submitted.values().fold(0, |total, headers| total + headers.len()),
 		}
 	}
 
 	/// Returns number of headers that are currently in the queue.
 	pub fn total_headers(&self) -> usize {
-		self.maybe_orphan
-			.values()
-			.fold(0, |total, headers| total + headers.len())
-			+ self.orphan.values().fold(0, |total, headers| total + headers.len())
-			+ self
-				.maybe_extra
-				.values()
-				.fold(0, |total, headers| total + headers.len())
-			+ self.extra.values().fold(0, |total, headers| total + headers.len())
-			+ self.ready.values().fold(0, |total, headers| total + headers.len())
-			+ self.incomplete.values().fold(0, |total, headers| total + headers.len())
+		self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()) +
+			self.orphan.values().fold(0, |total, headers| total + headers.len()) +
+			self.maybe_extra.values().fold(0, |total, headers| total + headers.len()) +
+			self.extra.values().fold(0, |total, headers| total + headers.len()) +
+			self.ready.values().fold(0, |total, headers| total + headers.len()) +
+			self.incomplete.values().fold(0, |total, headers| total + headers.len())
 	}
 
 	/// Returns number of best block in the queue.
@@ -157,8 +164,16 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 						std::cmp::max(
 							self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero),
 							std::cmp::max(
-								self.incomplete.keys().next_back().cloned().unwrap_or_else(Zero::zero),
-								self.submitted.keys().next_back().cloned().unwrap_or_else(Zero::zero),
+								self.incomplete
+									.keys()
+									.next_back()
+									.cloned()
+									.unwrap_or_else(Zero::zero),
+								self.submitted
+									.keys()
+									.next_back()
+									.cloned()
+									.unwrap_or_else(Zero::zero),
 							),
 						),
 					),
@@ -226,7 +241,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				id,
 				status,
 			);
-			return;
+			return
 		}
 
 		if id.0 < self.prune_border {
@@ -236,7 +251,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				P::SOURCE_NAME,
 				id,
 			);
-			return;
+			return
 		}
 
 		let parent_id = header.parent_id();
@@ -247,20 +262,20 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => {
 				insert_header(&mut self.maybe_orphan, id, header);
 				HeaderStatus::MaybeOrphan
-			}
+			},
 			HeaderStatus::Orphan => {
 				insert_header(&mut self.orphan, id, header);
 				HeaderStatus::Orphan
-			}
-			HeaderStatus::MaybeExtra
-			| HeaderStatus::Extra
-			| HeaderStatus::Ready
-			| HeaderStatus::Incomplete
-			| HeaderStatus::Submitted
-			| HeaderStatus::Synced => {
+			},
+			HeaderStatus::MaybeExtra |
+			HeaderStatus::Extra |
+			HeaderStatus::Ready |
+			HeaderStatus::Incomplete |
+			HeaderStatus::Submitted |
+			HeaderStatus::Synced => {
 				insert_header(&mut self.maybe_extra, id, header);
 				HeaderStatus::MaybeExtra
-			}
+			},
 		};
 
 		self.known_headers.entry(id.0).or_default().insert(id.1, status);
@@ -288,7 +303,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				HeaderStatus::Orphan,
 				id,
 			);
-			return;
+			return
 		}
 
 		move_header_descendants::<P>(
@@ -351,8 +366,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 					id,
 				);
 
-				return;
-			}
+				return
+			},
 		};
 
 		// do not remove from `incomplete_headers` here, because otherwise we'll miss
@@ -414,14 +429,20 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	}
 
 	/// Marks given headers incomplete.
-	pub fn add_incomplete_headers(&mut self, make_header_incomplete: bool, new_incomplete_headers: Vec<HeaderIdOf<P>>) {
+	pub fn add_incomplete_headers(
+		&mut self,
+		make_header_incomplete: bool,
+		new_incomplete_headers: Vec<HeaderIdOf<P>>,
+	) {
 		for new_incomplete_header in new_incomplete_headers {
 			if make_header_incomplete {
 				self.header_synced(&new_incomplete_header);
 			}
 
-			let move_origins = select_synced_children::<P>(&self.synced_children, &new_incomplete_header);
-			let move_origins = move_origins.into_iter().chain(std::iter::once(new_incomplete_header));
+			let move_origins =
+				select_synced_children::<P>(&self.synced_children, &new_incomplete_header);
+			let move_origins =
+				move_origins.into_iter().chain(std::iter::once(new_incomplete_header));
 			for move_origin in move_origins {
 				move_header_descendants::<P>(
 					&mut [&mut self.ready, &mut self.submitted],
@@ -450,7 +471,9 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		// are moved from Ready/Submitted to Incomplete queue
 		let new_incomplete_headers = ids
 			.iter()
-			.filter(|id| !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id))
+			.filter(|id| {
+				!self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id)
+			})
 			.cloned()
 			.collect::<Vec<_>>();
 		self.add_incomplete_headers(true, new_incomplete_headers);
@@ -468,8 +491,10 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			// sub2eth rejects H if H.Parent is incomplete
 			// sub2sub allows 'syncing' headers like that
 			// => let's check if there are some synced children of just completed header
-			let move_origins = select_synced_children::<P>(&self.synced_children, &just_completed_header);
-			let move_origins = move_origins.into_iter().chain(std::iter::once(just_completed_header));
+			let move_origins =
+				select_synced_children::<P>(&self.synced_children, &just_completed_header);
+			let move_origins =
+				move_origins.into_iter().chain(std::iter::once(just_completed_header));
 			for move_origin in move_origins {
 				move_header_descendants::<P>(
 					&mut [&mut self.incomplete],
@@ -500,7 +525,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	pub fn incomplete_header(&mut self) -> Option<HeaderIdOf<P>> {
 		queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| {
 			let retry = match *last_fetch_time {
-				Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL,
+				Some(last_fetch_time) =>
+					last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL,
 				None => true,
 			};
 
@@ -521,7 +547,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	/// Prune and never accept headers before this block.
 	pub fn prune(&mut self, prune_border: P::Number) {
 		if prune_border <= self.prune_border {
-			return;
+			return
 		}
 
 		prune_queue(&mut self.maybe_orphan, prune_border);
@@ -570,10 +596,10 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		match header {
 			Some(header) => {
 				let parent_id = header.header().parent_id();
-				self.incomplete_headers.contains_key(&parent_id)
-					|| self.completion_data.contains_key(&parent_id)
-					|| self.status(&parent_id) == HeaderStatus::Incomplete
-			}
+				self.incomplete_headers.contains_key(&parent_id) ||
+					self.completion_data.contains_key(&parent_id) ||
+					self.status(&parent_id) == HeaderStatus::Incomplete
+			},
 			None => false,
 		}
 	}
@@ -603,12 +629,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			.expect("header has a given status; given queue has the header; qed");
 
 			// remember ids of all the children of the current header
-			let synced_children_entry = self
-				.synced_children
-				.entry(current.0)
-				.or_default()
-				.entry(current.1)
-				.or_default();
+			let synced_children_entry =
+				self.synced_children.entry(current.0).or_default().entry(current.1).or_default();
 			let all_queues = [
 				&self.maybe_orphan,
 				&self.orphan,
@@ -624,7 +646,9 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 					.map(|potential_children| {
 						potential_children
 							.values()
-							.filter(|potential_child| potential_child.header().parent_id() == current)
+							.filter(|potential_child| {
+								potential_child.header().parent_id() == current
+							})
 							.map(|child| child.id())
 							.collect::<Vec<_>>()
 					})
@@ -661,12 +685,19 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 }
 
 /// Insert header to the queue.
-fn insert_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: HeaderIdOf<P>, header: QueuedHeader<P>) {
+fn insert_header<P: HeadersSyncPipeline>(
+	queue: &mut HeadersQueue<P>,
+	id: HeaderIdOf<P>,
+	header: QueuedHeader<P>,
+) {
 	queue.entry(id.0).or_default().insert(id.1, header);
 }
 
 /// Remove header from the queue.
-fn remove_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<QueuedHeader<P>> {
+fn remove_header<P: HeadersSyncPipeline>(
+	queue: &mut HeadersQueue<P>,
+	id: &HeaderIdOf<P>,
+) -> Option<QueuedHeader<P>> {
 	let mut headers_at = match queue.entry(id.0) {
 		BTreeMapEntry::Occupied(headers_at) => headers_at,
 		BTreeMapEntry::Vacant(_) => return None,
@@ -680,7 +711,10 @@ fn remove_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: &Heade
 }
 
 /// Get header from the queue.
-fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<&'a QueuedHeader<P>> {
+fn header<'a, P: HeadersSyncPipeline>(
+	queue: &'a HeadersQueue<P>,
+	id: &HeaderIdOf<P>,
+) -> Option<&'a QueuedHeader<P>> {
 	queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1))
 }
 
@@ -799,11 +833,7 @@ fn oldest_headers<P: HeadersSyncPipeline>(
 	queue: &HeadersQueue<P>,
 	mut f: impl FnMut(&QueuedHeader<P>) -> bool,
 ) -> Option<Vec<&QueuedHeader<P>>> {
-	let result = queue
-		.values()
-		.flat_map(|h| h.values())
-		.take_while(|h| f(h))
-		.collect::<Vec<_>>();
+	let result = queue.values().flat_map(|h| h.values()).take_while(|h| f(h)).collect::<Vec<_>>();
 	if result.is_empty() {
 		None
 	} else {
@@ -817,7 +847,10 @@ fn prune_queue<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, prune_border
 }
 
 /// Forget all known headers with number less than given.
-fn prune_known_headers<P: HeadersSyncPipeline>(known_headers: &mut KnownHeaders<P>, prune_border: P::Number) {
+fn prune_known_headers<P: HeadersSyncPipeline>(
+	known_headers: &mut KnownHeaders<P>,
+	prune_border: P::Number,
+) {
 	let new_known_headers = known_headers.split_off(&prune_border);
 	for (pruned_number, pruned_headers) in &*known_headers {
 		for pruned_hash in pruned_headers.keys() {
@@ -848,8 +881,8 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 	map: &mut LinkedHashMap<Id, T>,
 	filter: impl FnMut(&mut T) -> bool,
 ) -> Option<(Id, &T)> {
-	// TODO (#84): headers that have been just appended to the end of the queue would have to wait until
-	// all previous headers will be retried
+	// TODO (#84): headers that have been just appended to the end of the queue would have to wait
+	// until all previous headers will be retried
 
 	let retry_old_header = map
 		.front()
@@ -857,9 +890,10 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 		.and_then(|key| map.get_mut(&key).map(filter))
 		.unwrap_or(false);
 	if retry_old_header {
-		let (header_key, header) = map.pop_front().expect("we have checked that front() exists; qed");
+		let (header_key, header) =
+			map.pop_front().expect("we have checked that front() exists; qed");
 		map.insert(header_key, header);
-		return map.back().map(|(id, data)| (id.clone(), data));
+		return map.back().map(|(id, data)| (id.clone(), data))
 	}
 
 	None
@@ -868,15 +902,15 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::sync_loop_tests::{TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber};
-	use crate::sync_types::QueuedHeader;
+	use crate::{
+		sync_loop_tests::{
+			TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber,
+		},
+		sync_types::QueuedHeader,
+	};
 
 	pub(crate) fn header(number: TestNumber) -> QueuedHeader<TestHeadersSyncPipeline> {
-		QueuedHeader::new(TestHeader {
-			number,
-			hash: hash(number),
-			parent_hash: hash(number - 1),
-		})
+		QueuedHeader::new(TestHeader { number, hash: hash(number), parent_hash: hash(number - 1) })
 	}
 
 	pub(crate) fn hash(number: TestNumber) -> TestHash {
@@ -891,34 +925,41 @@ pub(crate) mod tests {
 	fn total_headers_works() {
 		// total headers just sums up number of headers in every queue
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(1),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(2),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(2).or_default().insert(
-			hash(3),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.orphan.entry(3).or_default().insert(
-			hash(4),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_extra.entry(4).or_default().insert(
-			hash(5),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.ready.entry(5).or_default().insert(
-			hash(6),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.incomplete.entry(6).or_default().insert(
-			hash(7),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(1), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(2), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(2)
+			.or_default()
+			.insert(hash(3), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.orphan
+			.entry(3)
+			.or_default()
+			.insert(hash(4), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_extra
+			.entry(4)
+			.or_default()
+			.insert(hash(5), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.ready
+			.entry(5)
+			.or_default()
+			.insert(hash(6), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.incomplete
+			.entry(6)
+			.or_default()
+			.insert(hash(7), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.total_headers(), 7);
 	}
 
@@ -926,48 +967,56 @@ pub(crate) mod tests {
 	fn best_queued_number_works() {
 		// initially there are headers in MaybeOrphan queue only
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(1),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(2),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(3).or_default().insert(
-			hash(3),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(1), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(2), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(3)
+			.or_default()
+			.insert(hash(3), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 3);
 		// and then there's better header in Orphan
-		queue.orphan.entry(10).or_default().insert(
-			hash(10),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.orphan
+			.entry(10)
+			.or_default()
+			.insert(hash(10), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 10);
 		// and then there's better header in MaybeExtra
-		queue.maybe_extra.entry(20).or_default().insert(
-			hash(20),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_extra
+			.entry(20)
+			.or_default()
+			.insert(hash(20), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 20);
 		// and then there's better header in Ready
-		queue.ready.entry(30).or_default().insert(
-			hash(30),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.ready
+			.entry(30)
+			.or_default()
+			.insert(hash(30), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 30);
 		// and then there's better header in MaybeOrphan again
-		queue.maybe_orphan.entry(40).or_default().insert(
-			hash(40),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(40)
+			.or_default()
+			.insert(hash(40), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 40);
 		// and then there's some header in Incomplete
-		queue.incomplete.entry(50).or_default().insert(
-			hash(50),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.incomplete
+			.entry(50)
+			.or_default()
+			.insert(hash(50), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 50);
 	}
 
@@ -977,11 +1026,7 @@ pub(crate) mod tests {
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 		assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown);
 		// and status is read from the KnownHeaders
-		queue
-			.known_headers
-			.entry(10)
-			.or_default()
-			.insert(hash(10), HeaderStatus::Ready);
+		queue.known_headers.entry(10).or_default().insert(hash(10), HeaderStatus::Ready);
 		assert_eq!(queue.status(&id(10)), HeaderStatus::Ready);
 	}
 
@@ -990,22 +1035,13 @@ pub(crate) mod tests {
 		// initially we have oldest header #10
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 		queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(100)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100));
 		// inserting #20 changes nothing
 		queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(100)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100));
 		// inserting #5 makes it oldest
 		queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(102)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(102));
 	}
 
 	#[test]
@@ -1091,11 +1127,7 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(99)
@@ -1108,17 +1140,9 @@ pub(crate) mod tests {
 			.or_default()
 			.insert(hash(98), HeaderStatus::MaybeExtra);
 		queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98));
-		queue
-			.known_headers
-			.entry(97)
-			.or_default()
-			.insert(hash(97), HeaderStatus::Extra);
+		queue.known_headers.entry(97).or_default().insert(hash(97), HeaderStatus::Extra);
 		queue.extra.entry(97).or_default().insert(hash(97), header(97));
-		queue
-			.known_headers
-			.entry(96)
-			.or_default()
-			.insert(hash(96), HeaderStatus::Ready);
+		queue.known_headers.entry(96).or_default().insert(hash(96), HeaderStatus::Ready);
 		queue.ready.entry(96).or_default().insert(hash(96), header(96));
 		queue.target_best_header_response(&id(100));
 
@@ -1137,31 +1161,19 @@ pub(crate) mod tests {
 		// children of synced headers are stored
 		assert_eq!(
 			vec![id(97)],
-			queue.synced_children[&96][&hash(96)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&96][&hash(96)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(98)],
-			queue.synced_children[&97][&hash(97)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&97][&hash(97)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(99)],
-			queue.synced_children[&98][&hash(98)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&98][&hash(98)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(100)],
-			queue.synced_children[&99][&hash(99)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&99][&hash(99)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(0, queue.synced_children[&100][&hash(100)].len());
 	}
@@ -1185,11 +1197,7 @@ pub(crate) mod tests {
 			.entry(102)
 			.or_default()
 			.insert(hash(102), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(102)
-			.or_default()
-			.insert(hash(102), header(102));
+		queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102));
 		queue
 			.known_headers
 			.entry(103)
@@ -1221,11 +1229,7 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(101)
@@ -1237,11 +1241,7 @@ pub(crate) mod tests {
 			.entry(102)
 			.or_default()
 			.insert(hash(102), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(102)
-			.or_default()
-			.insert(hash(102), header(102));
+		queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102));
 		queue.maybe_orphan_response(&id(99), true);
 
 		// then all headers (#100..#103) are moved to the MaybeExtra queue
@@ -1266,21 +1266,13 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(101)
 			.or_default()
 			.insert(hash(101), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(101)
-			.or_default()
-			.insert(hash(101), header(101));
+		queue.maybe_orphan.entry(101).or_default().insert(hash(101), header(101));
 		queue.maybe_orphan_response(&id(99), false);
 
 		// then all headers (#100..#101) are moved to the Orphan queue
@@ -1395,7 +1387,9 @@ pub(crate) mod tests {
 		queue.incomplete_headers.clear();
 		queue.incomplete_headers.insert(
 			id(100),
-			Some(Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL),
+			Some(
+				Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL,
+			),
 		);
 		assert_eq!(queue.incomplete_header(), Some(id(100)));
 	}
@@ -1551,11 +1545,7 @@ pub(crate) mod tests {
 			.entry(104)
 			.or_default()
 			.insert(hash(104), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(104)
-			.or_default()
-			.insert(hash(104), header(104));
+		queue.maybe_orphan.entry(104).or_default().insert(hash(104), header(104));
 		queue
 			.known_headers
 			.entry(103)
@@ -1624,7 +1614,8 @@ pub(crate) mod tests {
 	fn incomplete_headers_are_still_incomplete_after_advance() {
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 
-		// relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete queue
+		// relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete
+		// queue
 		queue.incomplete_headers.insert(id(100), None);
 		queue.incomplete.entry(101).or_default().insert(hash(101), header(101));
 		queue.incomplete.entry(102).or_default().insert(hash(102), header(102));
@@ -1656,8 +1647,8 @@ pub(crate) mod tests {
 			.or_default()
 			.insert(hash(104), HeaderStatus::Incomplete);
 
-		// let's say relay#2 completes header#100 and then submits header#101+header#102 and it turns
-		// out that header#102 is also incomplete
+		// let's say relay#2 completes header#100 and then submits header#101+header#102 and it
+		// turns out that header#102 is also incomplete
 		queue.incomplete_headers_response(vec![id(102)].into_iter().collect());
 
 		// then the header#103 and the header#104 must have Incomplete status
diff --git a/bridges/relays/headers/src/sync.rs b/bridges/relays/headers/src/sync.rs
index 7e3d9020290f4a6470baa7f366a9bf03117bad0e..012b63f0dc59c355bc06b9e01833f61d8f9e85a4 100644
--- a/bridges/relays/headers/src/sync.rs
+++ b/bridges/relays/headers/src/sync.rs
@@ -19,8 +19,10 @@
 //! to submit to the target chain? The context makes decisions basing on parameters
 //! passed using `HeadersSyncParams` structure.
 
-use crate::headers::QueuedHeaders;
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader};
+use crate::{
+	headers::QueuedHeaders,
+	sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader},
+};
 use num_traits::{One, Saturating, Zero};
 
 /// Common sync params.
@@ -121,20 +123,21 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		// if we haven't received best header from source node yet, there's nothing we can download
 		let source_best_number = self.source_best_number?;
 
-		// if we haven't received known best header from target node yet, there's nothing we can download
+		// if we haven't received known best header from target node yet, there's nothing we can
+		// download
 		let target_best_header = self.target_best_header.as_ref()?;
 
 		// if there's too many headers in the queue, stop downloading
 		let in_memory_headers = self.headers.total_headers();
 		if in_memory_headers >= self.params.max_future_headers_to_download {
-			return None;
+			return None
 		}
 
 		// if queue is empty and best header on target is > than best header on source,
 		// then we shoud reorganization
 		let best_queued_number = self.headers.best_queued_number();
 		if best_queued_number.is_zero() && source_best_number < target_best_header.0 {
-			return Some(source_best_number);
+			return Some(source_best_number)
 		}
 
 		// we assume that there were no reorganizations if we have already downloaded best header
@@ -143,14 +146,14 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 			target_best_header.0,
 		);
 		if best_downloaded_number >= source_best_number {
-			return None;
+			return None
 		}
 
 		// download new header
 		Some(best_downloaded_number + One::one())
 	}
 
-	/// Selech orphan header to download.
+	/// Select orphan header to download.
 	pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader<P>> {
 		let orphan_header = self.headers.header(HeaderStatus::Orphan)?;
 
@@ -159,7 +162,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		// => let's avoid fetching duplicate headers
 		let parent_id = orphan_header.parent_id();
 		if self.headers.status(&parent_id) != HeaderStatus::Unknown {
-			return None;
+			return None
 		}
 
 		Some(orphan_header)
@@ -169,12 +172,12 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 	pub fn select_headers_to_submit(&self, stalled: bool) -> Option<Vec<&QueuedHeader<P>>> {
 		// maybe we have paused new headers submit?
 		if self.pause_submit {
-			return None;
+			return None
 		}
 
 		// if we operate in backup mode, we only submit headers when sync has stalled
 		if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled {
-			return None;
+			return None
 		}
 
 		let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted);
@@ -187,15 +190,17 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		let mut total_headers = 0;
 		self.headers.headers(HeaderStatus::Ready, |header| {
 			if total_headers == headers_to_submit_count {
-				return false;
+				return false
 			}
 			if total_headers == self.params.max_headers_in_single_submit {
-				return false;
+				return false
 			}
 
 			let encoded_size = P::estimate_size(header);
-			if total_headers != 0 && total_size + encoded_size > self.params.max_headers_size_in_single_submit {
-				return false;
+			if total_headers != 0 &&
+				total_size + encoded_size > self.params.max_headers_size_in_single_submit
+			{
+				return false
 			}
 
 			total_size += encoded_size;
@@ -228,15 +233,14 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 
 		// early return if it is still the same
 		if self.target_best_header == Some(best_header) {
-			return false;
+			return false
 		}
 
 		// remember that this header is now known to the Substrate runtime
 		self.headers.target_best_header_response(&best_header);
 
 		// prune ancient headers
-		self.headers
-			.prune(best_header.0.saturating_sub(self.params.prune_depth.into()));
+		self.headers.prune(best_header.0.saturating_sub(self.params.prune_depth.into()));
 
 		// finally remember the best header itself
 		self.target_best_header = Some(best_header);
@@ -281,9 +285,11 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 #[cfg(test)]
 pub mod tests {
 	use super::*;
-	use crate::headers::tests::{header, id};
-	use crate::sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber};
-	use crate::sync_types::HeaderStatus;
+	use crate::{
+		headers::tests::{header, id},
+		sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber},
+		sync_types::HeaderStatus,
+	};
 	use relay_utils::HeaderId;
 
 	fn side_hash(number: TestNumber) -> TestHash {
diff --git a/bridges/relays/headers/src/sync_loop.rs b/bridges/relays/headers/src/sync_loop.rs
index a557eca6a2306b275c5b5993bc8a276a57c9f5ee..d54a445d7f0e57e8d68aa5629a60e05af86e760c 100644
--- a/bridges/relays/headers/src/sync_loop.rs
+++ b/bridges/relays/headers/src/sync_loop.rs
@@ -16,9 +16,11 @@
 
 //! Entrypoint for running headers synchronization loop.
 
-use crate::sync::{HeadersSync, HeadersSyncParams};
-use crate::sync_loop_metrics::SyncLoopMetrics;
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders};
+use crate::{
+	sync::{HeadersSync, HeadersSyncParams},
+	sync_loop_metrics::SyncLoopMetrics,
+	sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders},
+};
 
 use async_trait::async_trait;
 use futures::{future::FutureExt, stream::StreamExt};
@@ -66,8 +68,10 @@ pub trait SourceClient<P: HeadersSyncPipeline>: RelayClient {
 	async fn header_by_number(&self, number: P::Number) -> Result<P::Header, Self::Error>;
 
 	/// Get completion data by header hash.
-	async fn header_completion(&self, id: HeaderIdOf<P>)
-		-> Result<(HeaderIdOf<P>, Option<P::Completion>), Self::Error>;
+	async fn header_completion(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Self::Error>;
 
 	/// Get extra data by header hash.
 	async fn header_extra(
@@ -84,20 +88,32 @@ pub trait TargetClient<P: HeadersSyncPipeline>: RelayClient {
 	async fn best_header_id(&self) -> Result<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns true if header is known to the target node.
-	async fn is_known_header(&self, id: HeaderIdOf<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
+	async fn is_known_header(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
 
 	/// Submit headers.
-	async fn submit_headers(&self, headers: Vec<QueuedHeader<P>>) -> SubmittedHeaders<HeaderIdOf<P>, Self::Error>;
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedHeader<P>>,
+	) -> SubmittedHeaders<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns ID of headers that require to be 'completed' before children can be submitted.
 	async fn incomplete_headers_ids(&self) -> Result<HashSet<HeaderIdOf<P>>, Self::Error>;
 
 	/// Submit completion data for header.
-	async fn complete_header(&self, id: HeaderIdOf<P>, completion: P::Completion)
-		-> Result<HeaderIdOf<P>, Self::Error>;
+	async fn complete_header(
+		&self,
+		id: HeaderIdOf<P>,
+		completion: P::Completion,
+	) -> Result<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns true if header requires extra data to be submitted.
-	async fn requires_extra(&self, header: QueuedHeader<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
+	async fn requires_extra(
+		&self,
+		header: QueuedHeader<P>,
+	) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
 }
 
 /// Synchronization maintain procedure.
@@ -110,7 +126,8 @@ pub trait SyncMaintain<P: HeadersSyncPipeline>: 'static + Clone + Send + Sync {
 
 impl<P: HeadersSyncPipeline> SyncMaintain<P> for () {}
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: HeadersSyncPipeline>() -> String {
 	format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME)
 }
@@ -480,7 +497,8 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 					id,
 				);
 
-				target_complete_header_future.set(target_client.complete_header(id, completion.clone()).fuse());
+				target_complete_header_future
+					.set(target_client.complete_header(id, completion.clone()).fuse());
 			} else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) {
 				log::debug!(
 					target: "bridge",
@@ -501,8 +519,8 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 				);
 
 				target_existence_status_future.set(target_client.is_known_header(parent_id).fuse());
-			} else if let Some(headers) =
-				sync.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT)
+			} else if let Some(headers) = sync
+				.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT)
 			{
 				log::debug!(
 					target: "bridge",
@@ -580,7 +598,7 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 						P::SOURCE_NAME,
 						P::TARGET_NAME,
 					);
-					return Ok(());
+					return Ok(())
 				}
 
 				log::debug!(
@@ -616,15 +634,14 @@ fn print_sync_progress<P: HeadersSyncPipeline>(
 	let now_time = Instant::now();
 	let (now_best_header, now_target_header) = eth_sync.status();
 
-	let need_update = now_time - prev_time > Duration::from_secs(10)
-		|| match (prev_best_header, now_best_header) {
-			(Some(prev_best_header), Some(now_best_header)) => {
-				now_best_header.0.saturating_sub(prev_best_header) > 10.into()
-			}
+	let need_update = now_time - prev_time > Duration::from_secs(10) ||
+		match (prev_best_header, now_best_header) {
+			(Some(prev_best_header), Some(now_best_header)) =>
+				now_best_header.0.saturating_sub(prev_best_header) > 10.into(),
 			_ => false,
 		};
 	if !need_update {
-		return (prev_time, prev_best_header, prev_target_header);
+		return (prev_time, prev_best_header, prev_target_header)
 	}
 
 	log::info!(
diff --git a/bridges/relays/headers/src/sync_loop_metrics.rs b/bridges/relays/headers/src/sync_loop_metrics.rs
index 37dae1134042890420f43fc19dc8d7ca016e58b2..1c558c25de9d5e06a10af707201c80316c41c8c2 100644
--- a/bridges/relays/headers/src/sync_loop_metrics.rs
+++ b/bridges/relays/headers/src/sync_loop_metrics.rs
@@ -16,8 +16,10 @@
 
 //! Metrics for headers synchronization relay loop.
 
-use crate::sync::HeadersSync;
-use crate::sync_types::{HeaderStatus, HeadersSyncPipeline};
+use crate::{
+	sync::HeadersSync,
+	sync_types::{HeaderStatus, HeadersSyncPipeline},
+};
 
 use num_traits::Zero;
 use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64};
@@ -78,7 +80,8 @@ impl SyncLoopMetrics {
 	pub fn update<P: HeadersSyncPipeline>(&self, sync: &HeadersSync<P>) {
 		let headers = sync.headers();
 		let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero);
-		let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero);
+		let target_best_number =
+			sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero);
 
 		self.update_best_block_at_source(source_best_number);
 		self.update_best_block_at_target(target_best_number);
diff --git a/bridges/relays/headers/src/sync_loop_tests.rs b/bridges/relays/headers/src/sync_loop_tests.rs
index 3347c4d0d3bd046718926448a91d4cae5465fe02..f100998ca83f97443b9c21d29cab2ab6c21cb7a1 100644
--- a/bridges/relays/headers/src/sync_loop_tests.rs
+++ b/bridges/relays/headers/src/sync_loop_tests.rs
@@ -16,16 +16,18 @@
 
 #![cfg(test)]
 
-use crate::sync_loop::{run, SourceClient, TargetClient};
-use crate::sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders};
+use crate::{
+	sync_loop::{run, SourceClient, TargetClient},
+	sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders},
+};
 
 use async_trait::async_trait;
 use backoff::backoff::Backoff;
 use futures::{future::FutureExt, stream::StreamExt};
 use parking_lot::Mutex;
 use relay_utils::{
-	metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId,
-	MaybeConnectionError,
+	metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient,
+	retry_backoff, HeaderId, MaybeConnectionError,
 };
 use std::{
 	collections::{HashMap, HashSet},
@@ -166,7 +168,10 @@ impl SourceClient<TestHeadersSyncPipeline> for Source {
 		data.header_by_number.get(&number).cloned().ok_or(TestError(false))
 	}
 
-	async fn header_completion(&self, id: TestHeaderId) -> Result<(TestHeaderId, Option<TestCompletion>), TestError> {
+	async fn header_completion(
+		&self,
+		id: TestHeaderId,
+	) -> Result<(TestHeaderId, Option<TestCompletion>), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data);
 		if data.provides_completion {
@@ -264,7 +269,10 @@ impl TargetClient<TestHeadersSyncPipeline> for Target {
 			.unwrap_or(Ok((id, false)))
 	}
 
-	async fn submit_headers(&self, headers: Vec<TestQueuedHeader>) -> SubmittedHeaders<TestHeaderId, TestError> {
+	async fn submit_headers(
+		&self,
+		headers: Vec<TestQueuedHeader>,
+	) -> SubmittedHeaders<TestHeaderId, TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data);
 		data.submitted_headers
@@ -287,14 +295,21 @@ impl TargetClient<TestHeadersSyncPipeline> for Target {
 		}
 	}
 
-	async fn complete_header(&self, id: TestHeaderId, completion: TestCompletion) -> Result<TestHeaderId, TestError> {
+	async fn complete_header(
+		&self,
+		id: TestHeaderId,
+		completion: TestCompletion,
+	) -> Result<TestHeaderId, TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data);
 		data.completed_headers.insert(id.1, completion);
 		Ok(id)
 	}
 
-	async fn requires_extra(&self, header: TestQueuedHeader) -> Result<(TestHeaderId, bool), TestError> {
+	async fn requires_extra(
+		&self,
+		header: TestQueuedHeader,
+	) -> Result<(TestHeaderId, bool), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data);
 		if data.requires_extra {
@@ -321,11 +336,7 @@ fn test_header(number: TestNumber) -> TestHeader {
 	TestHeader {
 		hash: id.1,
 		number: id.0,
-		parent_hash: if number == 0 {
-			TestHash::default()
-		} else {
-			test_id(number - 1).1
-		},
+		parent_hash: if number == 0 { TestHash::default() } else { test_id(number - 1).1 },
 	}
 }
 
@@ -467,18 +478,15 @@ fn run_sync_loop_test(params: SyncLoopTestParams) {
 	let target_requires_extra = params.target_requires_extra;
 	let target_requires_completion = params.target_requires_completion;
 	let stop_at = params.stop_at;
-	let source = Source::new(
-		params.best_source_header.id(),
-		params.headers_on_source,
-		move |method, _| {
+	let source =
+		Source::new(params.best_source_header.id(), params.headers_on_source, move |method, _| {
 			if !target_requires_extra {
 				source_reject_extra(&method);
 			}
 			if !target_requires_completion {
 				source_reject_completion(&method);
 			}
-		},
-	);
+		});
 	let target = Target::new(
 		params.best_target_header.id(),
 		params.headers_on_target.into_iter().map(|header| header.id()).collect(),
diff --git a/bridges/relays/headers/src/sync_types.rs b/bridges/relays/headers/src/sync_types.rs
index 05ed25114ed0899916250234f5d76618e016d60a..8d93e8bf49fbea15e8c1305b2d0ce3a7a7bbe092 100644
--- a/bridges/relays/headers/src/sync_types.rs
+++ b/bridges/relays/headers/src/sync_types.rs
@@ -50,7 +50,14 @@ pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync {
 	const TARGET_NAME: &'static str;
 
 	/// Headers we're syncing are identified by this hash.
-	type Hash: Eq + Clone + Copy + Send + Sync + std::fmt::Debug + std::fmt::Display + std::hash::Hash;
+	type Hash: Eq
+		+ Clone
+		+ Copy
+		+ Send
+		+ Sync
+		+ std::fmt::Debug
+		+ std::fmt::Display
+		+ std::hash::Hash;
 	/// Headers we're syncing are identified by this number.
 	type Number: relay_utils::BlockNumberBase;
 	/// Type of header that we're syncing.
@@ -77,7 +84,8 @@ pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync {
 }
 
 /// A HeaderId for `HeaderSyncPipeline`.
-pub type HeaderIdOf<P> = HeaderId<<P as HeadersSyncPipeline>::Hash, <P as HeadersSyncPipeline>::Number>;
+pub type HeaderIdOf<P> =
+	HeaderId<<P as HeadersSyncPipeline>::Hash, <P as HeadersSyncPipeline>::Number>;
 
 /// Header that we're receiving from source node.
 pub trait SourceHeader<Hash, Number>: Clone + std::fmt::Debug + PartialEq + Send + Sync {
@@ -153,8 +161,8 @@ impl<P: HeadersSyncPipeline> QueuedHeader<P> {
 pub struct SubmittedHeaders<Id, Error> {
 	/// IDs of headers that have been submitted to target node.
 	pub submitted: Vec<Id>,
-	/// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` vec),
-	/// but all descendants are not.
+	/// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted`
+	/// vec), but all descendants are not.
 	pub incomplete: Vec<Id>,
 	/// IDs of ignored headers that we have decided not to submit (they are either rejected by
 	/// target node immediately, or their descendants of incomplete headers).
@@ -180,10 +188,6 @@ impl<Id: std::fmt::Debug, Error> std::fmt::Display for SubmittedHeaders<Id, Erro
 		let incomplete = format_ids(self.incomplete.iter());
 		let rejected = format_ids(self.rejected.iter());
 
-		write!(
-			f,
-			"Submitted: {}, Incomplete: {}, Rejected: {}",
-			submitted, incomplete, rejected
-		)
+		write!(f, "Submitted: {}, Incomplete: {}, Rejected: {}", submitted, incomplete, rejected)
 	}
 }
diff --git a/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs b/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs
index 3e32fe32458eee51c4ee65e435a89799bca0fdde..93458457d34c9dc4213aa71817d8cb6f73ef6a76 100644
--- a/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs
+++ b/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs
@@ -33,7 +33,8 @@ enum TransactionStatus {
 
 /// Run infinite conversion rate updater loop.
 ///
-/// The loop is maintaining the Left -> Right conversion rate, used as `RightTokens = LeftTokens * Rate`.
+/// The loop is maintaining the Left -> Right conversion rate, used as `RightTokens = LeftTokens *
+/// Rate`.
 pub fn run_conversion_rate_update_loop<
 	SubmitConversionRateFuture: Future<Output = anyhow::Result<()>> + Send + 'static,
 >(
@@ -60,10 +61,10 @@ pub fn run_conversion_rate_update_loop<
 				match submit_conversion_rate_future.await {
 					Ok(()) => {
 						transaction_status = TransactionStatus::Submitted(prev_conversion_rate);
-					}
+					},
 					Err(error) => {
 						log::trace!(target: "bridge", "Failed to submit conversion rate update transaction: {:?}", error);
-					}
+					},
 				}
 			}
 		}
@@ -78,41 +79,43 @@ async fn maybe_select_new_conversion_rate(
 	right_to_base_conversion_rate: &F64SharedRef,
 	max_difference_ratio: f64,
 ) -> Option<(f64, f64)> {
-	let left_to_right_stored_conversion_rate = (*left_to_right_stored_conversion_rate.read().await)?;
+	let left_to_right_stored_conversion_rate =
+		(*left_to_right_stored_conversion_rate.read().await)?;
 	match *transaction_status {
 		TransactionStatus::Idle => (),
 		TransactionStatus::Submitted(previous_left_to_right_stored_conversion_rate) => {
-			// we can't compare float values from different sources directly, so we only care whether the
-			// stored rate has been changed or not. If it has been changed, then we assume that our proposal
-			// has been accepted.
+			// we can't compare float values from different sources directly, so we only care
+			// whether the stored rate has been changed or not. If it has been changed, then we
+			// assume that our proposal has been accepted.
 			//
-			// float comparison is ok here, because we compare same-origin (stored in runtime storage) values
-			// and if they are different, it means that the value has actually been updated
+			// float comparison is ok here, because we compare same-origin (stored in runtime
+			// storage) values and if they are different, it means that the value has actually been
+			// updated
 			#[allow(clippy::float_cmp)]
-			if previous_left_to_right_stored_conversion_rate == left_to_right_stored_conversion_rate {
-				// the rate has not been changed => we won't submit any transactions until it is accepted,
-				// or the rate is changed by someone else
-				return None;
+			if previous_left_to_right_stored_conversion_rate == left_to_right_stored_conversion_rate
+			{
+				// the rate has not been changed => we won't submit any transactions until it is
+				// accepted, or the rate is changed by someone else
+				return None
 			}
 
 			*transaction_status = TransactionStatus::Idle;
-		}
+		},
 	}
 
 	let left_to_base_conversion_rate = (*left_to_base_conversion_rate.read().await)?;
 	let right_to_base_conversion_rate = (*right_to_base_conversion_rate.read().await)?;
-	let actual_left_to_right_conversion_rate = right_to_base_conversion_rate / left_to_base_conversion_rate;
+	let actual_left_to_right_conversion_rate =
+		right_to_base_conversion_rate / left_to_base_conversion_rate;
 
-	let rate_difference = (actual_left_to_right_conversion_rate - left_to_right_stored_conversion_rate).abs();
+	let rate_difference =
+		(actual_left_to_right_conversion_rate - left_to_right_stored_conversion_rate).abs();
 	let rate_difference_ratio = rate_difference / left_to_right_stored_conversion_rate;
 	if rate_difference_ratio < max_difference_ratio {
-		return None;
+		return None
 	}
 
-	Some((
-		left_to_right_stored_conversion_rate,
-		actual_left_to_right_conversion_rate,
-	))
+	Some((left_to_right_stored_conversion_rate, actual_left_to_right_conversion_rate))
 }
 
 #[cfg(test)]
@@ -171,7 +174,13 @@ mod tests {
 	#[test]
 	fn transaction_is_not_submitted_when_left_to_base_rate_is_unknown() {
 		assert_eq!(
-			test_maybe_select_new_conversion_rate(TransactionStatus::Idle, Some(10.0), None, Some(1.0), 0.0),
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(10.0),
+				None,
+				Some(1.0),
+				0.0
+			),
 			(None, TransactionStatus::Idle),
 		);
 	}
@@ -179,7 +188,13 @@ mod tests {
 	#[test]
 	fn transaction_is_not_submitted_when_right_to_base_rate_is_unknown() {
 		assert_eq!(
-			test_maybe_select_new_conversion_rate(TransactionStatus::Idle, Some(10.0), Some(1.0), None, 0.0),
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(10.0),
+				Some(1.0),
+				None,
+				0.0
+			),
 			(None, TransactionStatus::Idle),
 		);
 	}
@@ -187,7 +202,13 @@ mod tests {
 	#[test]
 	fn transaction_is_not_submitted_when_stored_rate_is_unknown() {
 		assert_eq!(
-			test_maybe_select_new_conversion_rate(TransactionStatus::Idle, None, Some(1.0), Some(1.0), 0.0),
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				None,
+				Some(1.0),
+				Some(1.0),
+				0.0
+			),
 			(None, TransactionStatus::Idle),
 		);
 	}
@@ -195,7 +216,13 @@ mod tests {
 	#[test]
 	fn transaction_is_not_submitted_when_difference_is_below_threshold() {
 		assert_eq!(
-			test_maybe_select_new_conversion_rate(TransactionStatus::Idle, Some(1.0), Some(1.0), Some(1.01), 0.02),
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(1.0),
+				Some(1.0),
+				Some(1.01),
+				0.02
+			),
 			(None, TransactionStatus::Idle),
 		);
 	}
@@ -203,7 +230,13 @@ mod tests {
 	#[test]
 	fn transaction_is_submitted_when_difference_is_above_threshold() {
 		assert_eq!(
-			test_maybe_select_new_conversion_rate(TransactionStatus::Idle, Some(1.0), Some(1.0), Some(1.03), 0.02),
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(1.0),
+				Some(1.0),
+				Some(1.03),
+				0.02
+			),
 			(Some((1.0, 1.03)), TransactionStatus::Idle),
 		);
 	}
diff --git a/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs b/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
index 8595ab2842b3d4b61f051c1ef1ef918eb63d56ee..cca9e5196b3caca2cdaa53290c8df8f38c726f35 100644
--- a/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
+++ b/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
@@ -21,7 +21,9 @@ use crate::finality_target::SubstrateFinalityTarget;
 use bp_header_chain::justification::GrandpaJustification;
 use bp_runtime::AccountIdOf;
 use finality_relay::{FinalitySyncParams, FinalitySyncPipeline};
-use relay_substrate_client::{finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader};
+use relay_substrate_client::{
+	finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader,
+};
 use relay_utils::{metrics::MetricsParams, BlockNumberBase};
 use sp_core::Bytes;
 use std::{fmt::Debug, marker::PhantomData, time::Duration};
@@ -97,14 +99,12 @@ impl<SourceChain, TargetChain: Chain, TargetSign> Debug
 	}
 }
 
-impl<SourceChain, TargetChain: Chain, TargetSign> SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign> {
+impl<SourceChain, TargetChain: Chain, TargetSign>
+	SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>
+{
 	/// Create new Substrate-to-Substrate headers pipeline.
 	pub fn new(target_client: Client<TargetChain>, target_sign: TargetSign) -> Self {
-		SubstrateFinalityToSubstrate {
-			target_client,
-			target_sign,
-			_marker: Default::default(),
-		}
+		SubstrateFinalityToSubstrate { target_client, target_sign, _marker: Default::default() }
 	}
 }
 
@@ -157,7 +157,10 @@ where
 		FinalitySource::new(source_client, None),
 		SubstrateFinalityTarget::new(target_client, pipeline, transactions_mortality),
 		FinalitySyncParams {
-			tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL),
+			tick: std::cmp::max(
+				SourceChain::AVERAGE_BLOCK_INTERVAL,
+				TargetChain::AVERAGE_BLOCK_INTERVAL,
+			),
 			recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT,
 			stall_timeout: relay_substrate_client::transaction_stall_timeout(
 				transactions_mortality,
diff --git a/bridges/relays/lib-substrate-relay/src/finality_target.rs b/bridges/relays/lib-substrate-relay/src/finality_target.rs
index 5db7d080ab1b829be386c9c58360c11795aff7a5..1353eec87278ac945f2d7d68dbf4dc46a2614dfc 100644
--- a/bridges/relays/lib-substrate-relay/src/finality_target.rs
+++ b/bridges/relays/lib-substrate-relay/src/finality_target.rs
@@ -36,11 +36,7 @@ pub struct SubstrateFinalityTarget<C: Chain, P> {
 impl<C: Chain, P> SubstrateFinalityTarget<C, P> {
 	/// Create new Substrate headers target.
 	pub fn new(client: Client<C>, pipeline: P, transactions_mortality: Option<u32>) -> Self {
-		SubstrateFinalityTarget {
-			client,
-			pipeline,
-			transactions_mortality,
-		}
+		SubstrateFinalityTarget { client, pipeline, transactions_mortality }
 	}
 }
 
@@ -97,18 +93,21 @@ where
 		let pipeline = self.pipeline.clone();
 		let transactions_mortality = self.transactions_mortality;
 		self.client
-			.submit_signed_extrinsic(transactions_author, move |best_block_id, transaction_nonce| {
-				pipeline.make_submit_finality_proof_transaction(
-					relay_substrate_client::TransactionEra::new(
-						best_block_id.0,
-						best_block_id.1,
-						transactions_mortality,
-					),
-					transaction_nonce,
-					header,
-					proof,
-				)
-			})
+			.submit_signed_extrinsic(
+				transactions_author,
+				move |best_block_id, transaction_nonce| {
+					pipeline.make_submit_finality_proof_transaction(
+						relay_substrate_client::TransactionEra::new(
+							best_block_id.0,
+							best_block_id.1,
+							transactions_mortality,
+						),
+						transaction_nonce,
+						header,
+						proof,
+					)
+				},
+			)
 			.await
 			.map(drop)
 	}
diff --git a/bridges/relays/lib-substrate-relay/src/headers_initialize.rs b/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
index 4a3a16bbe17750e4edf2fcb72d4aa8ad1ede7ded..00397434a0d7ea1a3f45cef082927b5847d8136d 100644
--- a/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
+++ b/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
@@ -21,10 +21,10 @@
 //! and authorities set from source to target chain. The headers sync starts
 //! with this header.
 
-use bp_header_chain::InitializationData;
 use bp_header_chain::{
 	find_grandpa_authorities_scheduled_change,
 	justification::{verify_justification, GrandpaJustification},
+	InitializationData,
 };
 use codec::Decode;
 use finality_grandpa::voter_set::VoterSet;
@@ -103,31 +103,30 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	// In ideal world we just need to get best finalized header and then to read GRANDPA authorities
 	// set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header.
 	//
-	// But now there are problems with this approach - `CurrentSetId` may return invalid value. So here
-	// we're waiting for the next justification, read the authorities set and then try to figure out
-	// the set id with bruteforce.
-	let justifications = source_client
-		.subscribe_justifications()
-		.await
-		.map_err(|err| format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err))?;
+	// But now there are problems with this approach - `CurrentSetId` may return invalid value. So
+	// here we're waiting for the next justification, read the authorities set and then try to
+	// figure out the set id with bruteforce.
+	let justifications = source_client.subscribe_justifications().await.map_err(|err| {
+		format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err)
+	})?;
 
 	// Read next justification - the header that it finalizes will be used as initial header.
 	let justification = justifications
 		.next()
 		.await
 		.map_err(|err| err.to_string())
-		.and_then(|justification| justification.ok_or_else(|| "stream has ended unexpectedly".into()))
+		.and_then(|justification| {
+			justification.ok_or_else(|| "stream has ended unexpectedly".into())
+		})
 		.map_err(|err| {
-			format!(
-				"Failed to read {} justification from the stream: {}",
-				SourceChain::NAME,
-				err,
-			)
+			format!("Failed to read {} justification from the stream: {}", SourceChain::NAME, err,)
 		})?;
 
 	// Read initial header.
-	let justification: GrandpaJustification<SourceChain::Header> = Decode::decode(&mut &justification.0[..])
-		.map_err(|err| format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err))?;
+	let justification: GrandpaJustification<SourceChain::Header> =
+		Decode::decode(&mut &justification.0[..]).map_err(|err| {
+			format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err)
+		})?;
 
 	let (initial_header_hash, initial_header_number) =
 		(justification.commit.target_hash, justification.commit.target_number);
@@ -140,7 +139,8 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	);
 
 	// Read GRANDPA authorities set at initial header.
-	let initial_authorities_set = source_authorities_set(&source_client, initial_header_hash).await?;
+	let initial_authorities_set =
+		source_authorities_set(&source_client, initial_header_hash).await?;
 	log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}",
 		SourceChain::NAME,
 		initial_authorities_set,
@@ -159,7 +159,8 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	);
 	let schedules_change = scheduled_change.is_some();
 	if schedules_change {
-		authorities_for_verification = source_authorities_set(&source_client, *initial_header.parent_hash()).await?;
+		authorities_for_verification =
+			source_authorities_set(&source_client, *initial_header.parent_hash()).await?;
 		log::trace!(
 			target: "bridge",
 			"Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}",
@@ -171,13 +172,14 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	// Now let's try to guess authorities set id by verifying justification.
 	let mut initial_authorities_set_id = 0;
 	let mut min_possible_block_number = SourceChain::BlockNumber::zero();
-	let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()).ok_or_else(|| {
-		format!(
-			"Read invalid {} authorities set: {:?}",
-			SourceChain::NAME,
-			authorities_for_verification,
-		)
-	})?;
+	let authorities_for_verification = VoterSet::new(authorities_for_verification.clone())
+		.ok_or_else(|| {
+			format!(
+				"Read invalid {} authorities set: {:?}",
+				SourceChain::NAME,
+				authorities_for_verification,
+			)
+		})?;
 	loop {
 		log::trace!(
 			target: "bridge", "Trying {} GRANDPA authorities set id: {}",
@@ -194,21 +196,21 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 		.is_ok();
 
 		if is_valid_set_id {
-			break;
+			break
 		}
 
 		initial_authorities_set_id += 1;
 		min_possible_block_number += One::one();
 		if min_possible_block_number > initial_header_number {
-			// there can't be more authorities set changes than headers => if we have reached `initial_block_number`
-			// and still have not found correct value of `initial_authorities_set_id`, then something
-			// else is broken => fail
+			// there can't be more authorities set changes than headers => if we have reached
+			// `initial_block_number` and still have not found correct value of
+			// `initial_authorities_set_id`, then something else is broken => fail
 			return Err(format!(
 				"Failed to guess initial {} GRANDPA authorities set id: checked all\
 			possible ids in range [0; {}]",
 				SourceChain::NAME,
 				initial_header_number
-			));
+			))
 		}
 	}
 
@@ -244,10 +246,8 @@ async fn source_authorities_set<SourceChain: Chain>(
 	source_client: &Client<SourceChain>,
 	header_hash: SourceChain::Hash,
 ) -> Result<GrandpaAuthoritiesSet, String> {
-	let raw_authorities_set = source_client
-		.grandpa_authorities_set(header_hash)
-		.await
-		.map_err(|err| {
+	let raw_authorities_set =
+		source_client.grandpa_authorities_set(header_hash).await.map_err(|err| {
 			format!(
 				"Failed to retrive {} GRANDPA authorities set at header {}: {:?}",
 				SourceChain::NAME,
diff --git a/bridges/relays/lib-substrate-relay/src/helpers.rs b/bridges/relays/lib-substrate-relay/src/helpers.rs
index 91d551140c2decc210131f746ff16a46331fbd7b..01f881998ad008abe0e9e9f817e59d54f1b6a4d0 100644
--- a/bridges/relays/lib-substrate-relay/src/helpers.rs
+++ b/bridges/relays/lib-substrate-relay/src/helpers.rs
@@ -27,15 +27,9 @@ pub fn token_price_metric(
 	FloatJsonValueMetric::new(
 		registry,
 		prefix,
-		format!(
-			"https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc",
-			token_id
-		),
+		format!("https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc", token_id),
 		format!("$.{}.btc", token_id),
 		format!("{}_to_base_conversion_rate", token_id.replace("-", "_")),
-		format!(
-			"Rate used to convert from {} to some BASE tokens",
-			token_id.to_uppercase()
-		),
+		format!("Rate used to convert from {} to some BASE tokens", token_id.to_uppercase()),
 	)
 }
diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs
index f1f67d6c6e3058eb8d0dfebf0fbf4a47ba5f21e6..973f52d60a38bb6d064f782ef00b366175a721a1 100644
--- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs
+++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs
@@ -16,9 +16,10 @@
 
 //! Tools for supporting message lanes between two Substrate-based chains.
 
-use crate::messages_source::SubstrateMessagesProof;
-use crate::messages_target::SubstrateMessagesReceivingProof;
-use crate::on_demand_headers::OnDemandHeadersRelay;
+use crate::{
+	messages_source::SubstrateMessagesProof, messages_target::SubstrateMessagesReceivingProof,
+	on_demand_headers::OnDemandHeadersRelay,
+};
 
 use async_trait::async_trait;
 use bp_messages::{LaneId, MessageNonce};
@@ -65,18 +66,22 @@ pub trait SubstrateMessageLane: 'static + Clone + Send + Sync {
 	/// Underlying generic message lane.
 	type MessageLane: MessageLane;
 
-	/// Name of the runtime method that returns dispatch weight of outbound messages at the source chain.
+	/// Name of the runtime method that returns dispatch weight of outbound messages at the source
+	/// chain.
 	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str;
 	/// Name of the runtime method that returns latest generated nonce at the source chain.
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str;
-	/// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain.
+	/// Name of the runtime method that returns latest received (confirmed) nonce at the the source
+	/// chain.
 	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
 
 	/// Name of the runtime method that returns latest received nonce at the target chain.
 	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
-	/// Name of the runtime method that returns the latest confirmed (reward-paid) nonce at the target chain.
+	/// Name of the runtime method that returns the latest confirmed (reward-paid) nonce at the
+	/// target chain.
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str;
-	/// Number of the runtime method that returns state of "unrewarded relayers" set at the target chain.
+	/// Number of the runtime method that returns state of "unrewarded relayers" set at the target
+	/// chain.
 	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str;
 
 	/// Name of the runtime method that returns id of best finalized source header at target chain.
@@ -101,7 +106,8 @@ pub trait SubstrateMessageLane: 'static + Clone + Send + Sync {
 	/// Target chain.
 	type TargetChain: Chain;
 
-	/// Returns id of account that we're using to sign transactions at target chain (messages proof).
+	/// Returns id of account that we're using to sign transactions at target chain (messages
+	/// proof).
 	fn target_transactions_author(&self) -> AccountIdOf<Self::TargetChain>;
 
 	/// Make messages delivery transaction.
@@ -113,7 +119,8 @@ pub trait SubstrateMessageLane: 'static + Clone + Send + Sync {
 		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes;
 
-	/// Returns id of account that we're using to sign transactions at source chain (delivery proof).
+	/// Returns id of account that we're using to sign transactions at source chain (delivery
+	/// proof).
 	fn source_transactions_author(&self) -> AccountIdOf<Self::SourceChain>;
 
 	/// Make messages receiving proof transaction.
@@ -127,7 +134,12 @@ pub trait SubstrateMessageLane: 'static + Clone + Send + Sync {
 
 /// Substrate-to-Substrate message lane.
 #[derive(Debug)]
-pub struct SubstrateMessageLaneToSubstrate<Source: Chain, SourceSignParams, Target: Chain, TargetSignParams> {
+pub struct SubstrateMessageLaneToSubstrate<
+	Source: Chain,
+	SourceSignParams,
+	Target: Chain,
+	TargetSignParams,
+> {
 	/// Client for the source Substrate chain.
 	pub source_client: Client<Source>,
 	/// Parameters required to sign transactions for source chain.
@@ -194,8 +206,8 @@ pub fn select_delivery_transaction_limits<W: pallet_bridge_messages::WeightInfoE
 	let weight_for_delivery_tx = max_extrinsic_weight / 3;
 	let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx;
 
-	let delivery_tx_base_weight =
-		W::receive_messages_proof_overhead() + W::receive_messages_proof_outbound_lane_state_overhead();
+	let delivery_tx_base_weight = W::receive_messages_proof_overhead() +
+		W::receive_messages_proof_outbound_lane_state_overhead();
 	let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight;
 	let max_number_of_messages = std::cmp::min(
 		delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1),
@@ -221,15 +233,18 @@ pub struct StandaloneMessagesMetrics {
 	pub target_to_base_conversion_rate: Option<F64SharedRef>,
 	/// Shared reference to the actual source -> <base> chain token conversion rate.
 	pub source_to_base_conversion_rate: Option<F64SharedRef>,
-	/// Shared reference to the stored (in the source chain runtime storage) target -> source chain conversion rate.
+	/// Shared reference to the stored (in the source chain runtime storage) target -> source chain
+	/// conversion rate.
 	pub target_to_source_conversion_rate: Option<F64SharedRef>,
 }
 
 impl StandaloneMessagesMetrics {
 	/// Return conversion rate from target to source tokens.
 	pub async fn target_to_source_conversion_rate(&self) -> Option<f64> {
-		let target_to_base_conversion_rate = (*self.target_to_base_conversion_rate.as_ref()?.read().await)?;
-		let source_to_base_conversion_rate = (*self.source_to_base_conversion_rate.as_ref()?.read().await)?;
+		let target_to_base_conversion_rate =
+			(*self.target_to_base_conversion_rate.as_ref()?.read().await)?;
+		let source_to_base_conversion_rate =
+			(*self.source_to_base_conversion_rate.as_ref()?.read().await)?;
 		Some(source_to_base_conversion_rate / target_to_base_conversion_rate)
 	}
 }
@@ -246,8 +261,8 @@ pub fn add_standalone_metrics<P: SubstrateMessageLane>(
 	let mut target_to_source_conversion_rate = None;
 	let mut source_to_base_conversion_rate = None;
 	let mut target_to_base_conversion_rate = None;
-	let mut metrics_params =
-		relay_utils::relay_metrics(metrics_prefix, metrics_params).standalone_metric(|registry, prefix| {
+	let mut metrics_params = relay_utils::relay_metrics(metrics_prefix, metrics_params)
+		.standalone_metric(|registry, prefix| {
 			StorageProofOverheadMetric::new(
 				registry,
 				prefix,
@@ -256,8 +271,10 @@ pub fn add_standalone_metrics<P: SubstrateMessageLane>(
 				format!("{} storage proof overhead", P::SourceChain::NAME),
 			)
 		})?;
-	if let Some((target_to_source_conversion_rate_storage_key, initial_target_to_source_conversion_rate)) =
-		target_to_source_conversion_rate_params
+	if let Some((
+		target_to_source_conversion_rate_storage_key,
+		initial_target_to_source_conversion_rate,
+	)) = target_to_source_conversion_rate_params
 	{
 		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
 			let metric = FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new(
@@ -285,14 +302,16 @@ pub fn add_standalone_metrics<P: SubstrateMessageLane>(
 	}
 	if let Some(source_chain_token_id) = source_chain_token_id {
 		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
-			let metric = crate::helpers::token_price_metric(registry, prefix, source_chain_token_id)?;
+			let metric =
+				crate::helpers::token_price_metric(registry, prefix, source_chain_token_id)?;
 			source_to_base_conversion_rate = Some(metric.shared_value_ref());
 			Ok(metric)
 		})?;
 	}
 	if let Some(target_chain_token_id) = target_chain_token_id {
 		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
-			let metric = crate::helpers::token_price_metric(registry, prefix, target_chain_token_id)?;
+			let metric =
+				crate::helpers::token_price_metric(registry, prefix, target_chain_token_id)?;
 			target_to_base_conversion_rate = Some(metric.shared_value_ref());
 			Ok(metric)
 		})?;
@@ -312,19 +331,21 @@ mod tests {
 	use super::*;
 	use async_std::sync::{Arc, RwLock};
 
-	type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>;
+	type RialtoToMillauMessagesWeights =
+		pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>;
 
 	#[test]
 	fn select_delivery_transaction_limits_works() {
-		let (max_count, max_weight) = select_delivery_transaction_limits::<RialtoToMillauMessagesWeights>(
-			bp_millau::max_extrinsic_weight(),
-			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-		);
+		let (max_count, max_weight) =
+			select_delivery_transaction_limits::<RialtoToMillauMessagesWeights>(
+				bp_millau::max_extrinsic_weight(),
+				bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+			);
 		assert_eq!(
 			(max_count, max_weight),
 			// We don't actually care about these values, so feel free to update them whenever test
-			// fails. The only thing to do before that is to ensure that new values looks sane: i.e. weight
-			// reserved for messages dispatch allows dispatch of non-trivial messages.
+			// fails. The only thing to do before that is to ensure that new values looks sane:
+			// i.e. weight reserved for messages dispatch allows dispatch of non-trivial messages.
 			//
 			// Any significant change in this values should attract additional attention.
 			(782, 216_583_333_334),
diff --git a/bridges/relays/lib-substrate-relay/src/messages_source.rs b/bridges/relays/lib-substrate-relay/src/messages_source.rs
index 4d3332d3271c9a17a8f7c8d385f66256337b9031..c450144a9f6e996198f9997093189b97520dfc52 100644
--- a/bridges/relays/lib-substrate-relay/src/messages_source.rs
+++ b/bridges/relays/lib-substrate-relay/src/messages_source.rs
@@ -18,9 +18,10 @@
 //! runtime that implements `<BridgedChainName>HeaderApi` to allow bridging with
 //! <BridgedName> chain.
 
-use crate::messages_lane::SubstrateMessageLane;
-use crate::messages_target::SubstrateMessagesReceivingProof;
-use crate::on_demand_headers::OnDemandHeadersRelay;
+use crate::{
+	messages_lane::SubstrateMessageLane, messages_target::SubstrateMessagesReceivingProof,
+	on_demand_headers::OnDemandHeadersRelay,
+};
 
 use async_trait::async_trait;
 use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
@@ -29,16 +30,17 @@ use bridge_runtime_common::messages::{
 };
 use codec::{Decode, Encode};
 use frame_support::weights::Weight;
-use messages_relay::message_lane::MessageLane;
 use messages_relay::{
-	message_lane::{SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
 	message_lane_loop::{
-		ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, SourceClientState,
+		ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient,
+		SourceClientState,
 	},
 };
 use num_traits::{Bounded, Zero};
 use relay_substrate_client::{
-	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf, HeaderOf, IndexOf,
+	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf, HeaderOf,
+	IndexOf,
 };
 use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId};
 use sp_core::Bytes;
@@ -69,12 +71,7 @@ impl<P: SubstrateMessageLane> SubstrateMessagesSource<P> {
 		lane_id: LaneId,
 		target_to_source_headers_relay: Option<OnDemandHeadersRelay<P::TargetChain>>,
 	) -> Self {
-		SubstrateMessagesSource {
-			client,
-			lane,
-			lane_id,
-			target_to_source_headers_relay,
-		}
+		SubstrateMessagesSource { client, lane, lane_id, target_to_source_headers_relay }
 	}
 }
 
@@ -150,8 +147,8 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_generated_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_generated_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_generated_nonce))
 	}
 
@@ -167,8 +164,8 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_received_nonce))
 	}
 
@@ -176,7 +173,10 @@ where
 		&self,
 		id: SourceHeaderIdOf<P::MessageLane>,
 		nonces: RangeInclusive<MessageNonce>,
-	) -> Result<MessageDetailsMap<<P::MessageLane as MessageLane>::SourceChainBalance>, SubstrateError> {
+	) -> Result<
+		MessageDetailsMap<<P::MessageLane as MessageLane>::SourceChainBalance>,
+		SubstrateError,
+	> {
 		let encoded_response = self
 			.client
 			.state_call(
@@ -187,7 +187,8 @@ where
 			.await?;
 
 		make_message_details_map::<P::SourceChain>(
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?,
+			Decode::decode(&mut &encoded_response.0[..])
+				.map_err(SubstrateError::ResponseParseFailed)?,
 			nonces,
 		)
 	}
@@ -205,7 +206,8 @@ where
 		),
 		SubstrateError,
 	> {
-		let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1);
+		let mut storage_keys =
+			Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1);
 		let mut message_nonce = *nonces.start();
 		while message_nonce <= *nonces.end() {
 			let message_key = pallet_bridge_messages::storage_keys::message_key(
@@ -223,12 +225,7 @@ where
 			));
 		}
 
-		let proof = self
-			.client
-			.prove_storage(storage_keys, id.1)
-			.await?
-			.iter_nodes()
-			.collect();
+		let proof = self.client.prove_storage(storage_keys, id.1).await?.iter_nodes().collect();
 		let proof = FromBridgedChainMessagesProof {
 			bridged_header_hash: id.1,
 			storage_proof: proof,
@@ -246,9 +243,16 @@ where
 	) -> Result<(), SubstrateError> {
 		let lane = self.lane.clone();
 		self.client
-			.submit_signed_extrinsic(self.lane.source_transactions_author(), move |_, transaction_nonce| {
-				lane.make_messages_receiving_proof_transaction(transaction_nonce, generated_at_block, proof)
-			})
+			.submit_signed_extrinsic(
+				self.lane.source_transactions_author(),
+				move |_, transaction_nonce| {
+					lane.make_messages_receiving_proof_transaction(
+						transaction_nonce,
+						generated_at_block,
+						proof,
+					)
+				},
+			)
 			.await?;
 		Ok(())
 	}
@@ -259,7 +263,9 @@ where
 		}
 	}
 
-	async fn estimate_confirmation_transaction(&self) -> <P::MessageLane as MessageLane>::SourceChainBalance {
+	async fn estimate_confirmation_transaction(
+		&self,
+	) -> <P::MessageLane as MessageLane>::SourceChainBalance {
 		self.client
 			.estimate_extrinsic_fee(self.lane.make_messages_receiving_proof_transaction(
 				Zero::zero(),
@@ -276,10 +282,14 @@ where
 ///
 /// We don't care about proof actually being the valid proof, because its validity doesn't
 /// affect the call weight - we only care about its size.
-fn prepare_dummy_messages_delivery_proof<SC: Chain, TC: Chain>() -> SubstrateMessagesReceivingProof<TC> {
-	let single_message_confirmation_size =
-		bp_messages::InboundLaneData::<()>::encoded_size_hint(SC::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, 1, 1)
-			.unwrap_or(u32::MAX);
+fn prepare_dummy_messages_delivery_proof<SC: Chain, TC: Chain>(
+) -> SubstrateMessagesReceivingProof<TC> {
+	let single_message_confirmation_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
+		SC::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
+		1,
+		1,
+	)
+	.unwrap_or(u32::MAX);
 	let proof_size = TC::STORAGE_PROOF_OVERHEAD.saturating_add(single_message_confirmation_size);
 	(
 		UnrewardedRelayersState {
@@ -303,7 +313,10 @@ fn prepare_dummy_messages_delivery_proof<SC: Chain, TC: Chain>() -> SubstrateMes
 pub async fn read_client_state<SelfChain, BridgedHeaderHash, BridgedHeaderNumber>(
 	self_client: &Client<SelfChain>,
 	best_finalized_header_id_method_name: &str,
-) -> Result<ClientState<HeaderIdOf<SelfChain>, HeaderId<BridgedHeaderHash, BridgedHeaderNumber>>, SubstrateError>
+) -> Result<
+	ClientState<HeaderIdOf<SelfChain>, HeaderId<BridgedHeaderHash, BridgedHeaderNumber>>,
+	SubstrateError,
+>
 where
 	SelfChain: Chain,
 	SelfChain::Header: DeserializeOwned,
@@ -313,8 +326,10 @@ where
 {
 	// let's read our state first: we need best finalized header hash on **this** chain
 	let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?;
-	let self_best_finalized_header = self_client.header_by_hash(self_best_finalized_header_hash).await?;
-	let self_best_finalized_id = HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash);
+	let self_best_finalized_header =
+		self_client.header_by_hash(self_best_finalized_header_hash).await?;
+	let self_best_finalized_id =
+		HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash);
 
 	// now let's read our best header on **this** chain
 	let self_best_header = self_client.best_header().await?;
@@ -330,11 +345,10 @@ where
 		)
 		.await?;
 	let decoded_best_finalized_peer_on_self: (BridgedHeaderNumber, BridgedHeaderHash) =
-		Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
-	let peer_on_self_best_finalized_id = HeaderId(
-		decoded_best_finalized_peer_on_self.0,
-		decoded_best_finalized_peer_on_self.1,
-	);
+		Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
+	let peer_on_self_best_finalized_id =
+		HeaderId(decoded_best_finalized_peer_on_self.0, decoded_best_finalized_peer_on_self.1);
 
 	Ok(ClientState {
 		best_self: self_best_id,
@@ -358,16 +372,14 @@ fn make_message_details_map<C: Chain>(
 
 	// this is actually prevented by external logic
 	if nonces.is_empty() {
-		return Ok(weights_map);
+		return Ok(weights_map)
 	}
 
 	// check if last nonce is missing - loop below is not checking this
-	let last_nonce_is_missing = weights
-		.last()
-		.map(|details| details.nonce != *nonces.end())
-		.unwrap_or(true);
+	let last_nonce_is_missing =
+		weights.last().map(|details| details.nonce != *nonces.end()).unwrap_or(true);
 	if last_nonce_is_missing {
-		return make_missing_nonce_error(*nonces.end());
+		return make_missing_nonce_error(*nonces.end())
 	}
 
 	let mut expected_nonce = *nonces.start();
@@ -379,20 +391,21 @@ fn make_message_details_map<C: Chain>(
 			(false, true) => {
 				// this may happen if some messages were already pruned from the source node
 				//
-				// this is not critical error and will be auto-resolved by messages lane (and target node)
+				// this is not critical error and will be auto-resolved by messages lane (and target
+				// node)
 				log::info!(
 					target: "bridge",
 					"Some messages are missing from the {} node: {:?}. Target node may be out of sync?",
 					C::NAME,
 					expected_nonce..details.nonce,
 				);
-			}
+			},
 			(false, false) => {
 				// some nonces are missing from the middle/tail of the range
 				//
 				// this is critical error, because we can't miss any nonces
-				return make_missing_nonce_error(expected_nonce);
-			}
+				return make_missing_nonce_error(expected_nonce)
+			},
 		}
 
 		weights_map.insert(
@@ -528,7 +541,8 @@ mod tests {
 
 	#[test]
 	fn prepare_dummy_messages_delivery_proof_works() {
-		let expected_minimal_size = Wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE + Rococo::STORAGE_PROOF_OVERHEAD;
+		let expected_minimal_size =
+			Wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE + Rococo::STORAGE_PROOF_OVERHEAD;
 		let dummy_proof = prepare_dummy_messages_delivery_proof::<Wococo, Rococo>();
 		assert!(
 			dummy_proof.1.encode().len() as u32 > expected_minimal_size,
diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs
index 8ad41281bfdcc25bd60b4cf49b6c606d2196d9f4..f7b911f2c023a228269393f33611bf5cb736bd6b 100644
--- a/bridges/relays/lib-substrate-relay/src/messages_target.rs
+++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs
@@ -18,9 +18,11 @@
 //! runtime that implements `<BridgedChainName>HeaderApi` to allow bridging with
 //! <BridgedName> chain.
 
-use crate::messages_lane::{StandaloneMessagesMetrics, SubstrateMessageLane};
-use crate::messages_source::{read_client_state, SubstrateMessagesProof};
-use crate::on_demand_headers::OnDemandHeadersRelay;
+use crate::{
+	messages_lane::{StandaloneMessagesMetrics, SubstrateMessageLane},
+	messages_source::{read_client_state, SubstrateMessagesProof},
+	on_demand_headers::OnDemandHeadersRelay,
+};
 
 use async_trait::async_trait;
 use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
@@ -30,14 +32,14 @@ use bridge_runtime_common::messages::{
 };
 use codec::{Decode, Encode};
 use frame_support::weights::{Weight, WeightToFeePolynomial};
-use messages_relay::message_lane::MessageLane;
 use messages_relay::{
-	message_lane::{SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
 	message_lane_loop::{TargetClient, TargetClientState},
 };
 use num_traits::{Bounded, Zero};
 use relay_substrate_client::{
-	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderOf, IndexOf, WeightToFeeOf,
+	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderOf, IndexOf,
+	WeightToFeeOf,
 };
 use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId};
 use sp_core::Bytes;
@@ -45,10 +47,8 @@ use sp_runtime::{traits::Saturating, DeserializeOwned, FixedPointNumber, FixedU1
 use std::{convert::TryFrom, ops::RangeInclusive};
 
 /// Message receiving proof returned by the target Substrate node.
-pub type SubstrateMessagesReceivingProof<C> = (
-	UnrewardedRelayersState,
-	FromBridgedChainMessagesDeliveryProof<HashOf<C>>,
-);
+pub type SubstrateMessagesReceivingProof<C> =
+	(UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof<HashOf<C>>);
 
 /// Substrate client as Substrate messages target.
 pub struct SubstrateMessagesTarget<P: SubstrateMessageLane> {
@@ -150,8 +150,8 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_received_nonce))
 	}
 
@@ -167,8 +167,8 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_received_nonce))
 	}
 
@@ -185,7 +185,8 @@ where
 			)
 			.await?;
 		let unrewarded_relayers_state: UnrewardedRelayersState =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+			Decode::decode(&mut &encoded_response.0[..])
+				.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, unrewarded_relayers_state))
 	}
 
@@ -193,10 +194,7 @@ where
 		&self,
 		id: TargetHeaderIdOf<P::MessageLane>,
 	) -> Result<
-		(
-			TargetHeaderIdOf<P::MessageLane>,
-			<P::MessageLane as MessageLane>::MessagesReceivingProof,
-		),
+		(TargetHeaderIdOf<P::MessageLane>, <P::MessageLane as MessageLane>::MessagesReceivingProof),
 		SubstrateError,
 	> {
 		let (id, relayers_state) = self.unrewarded_relayers_state(id).await?;
@@ -227,9 +225,17 @@ where
 		let lane = self.lane.clone();
 		let nonces_clone = nonces.clone();
 		self.client
-			.submit_signed_extrinsic(self.lane.target_transactions_author(), move |_, transaction_nonce| {
-				lane.make_messages_delivery_transaction(transaction_nonce, generated_at_header, nonces_clone, proof)
-			})
+			.submit_signed_extrinsic(
+				self.lane.target_transactions_author(),
+				move |_, transaction_nonce| {
+					lane.make_messages_delivery_transaction(
+						transaction_nonce,
+						generated_at_header,
+						nonces_clone,
+						proof,
+					)
+				},
+			)
 			.await?;
 		Ok(nonces)
 	}
@@ -247,11 +253,8 @@ where
 		total_dispatch_weight: Weight,
 		total_size: u32,
 	) -> Result<<P::MessageLane as MessageLane>::SourceChainBalance, SubstrateError> {
-		let conversion_rate = self
-			.metric_values
-			.target_to_source_conversion_rate()
-			.await
-			.ok_or_else(|| {
+		let conversion_rate =
+			self.metric_values.target_to_source_conversion_rate().await.ok_or_else(|| {
 				SubstrateError::Custom(format!(
 					"Failed to compute conversion rate from {} to {}",
 					P::TargetChain::NAME,
@@ -264,26 +267,31 @@ where
 			Zero::zero(),
 			HeaderId(Default::default(), Default::default()),
 			nonces.clone(),
-			prepare_dummy_messages_proof::<P::SourceChain>(nonces.clone(), total_dispatch_weight, total_size),
+			prepare_dummy_messages_proof::<P::SourceChain>(
+				nonces.clone(),
+				total_dispatch_weight,
+				total_size,
+			),
 		);
 		let delivery_tx_fee = self.client.estimate_extrinsic_fee(delivery_tx).await?;
 		let inclusion_fee_in_target_tokens = delivery_tx_fee.inclusion_fee();
 
-		// The pre-dispatch cost of delivery transaction includes additional fee to cover dispatch fee payment
-		// (Currency::transfer in regular deployment). But if message dispatch has already been paid
-		// at the Source chain, the delivery transaction will refund relayer with this additional cost.
-		// But `estimate_extrinsic_fee` obviously just returns pre-dispatch cost of the transaction. So
-		// if transaction delivers prepaid message, then it may happen that pre-dispatch cost is larger
-		// than reward and `Rational` relayer will refuse to deliver this message.
+		// The pre-dispatch cost of delivery transaction includes additional fee to cover dispatch
+		// fee payment (Currency::transfer in regular deployment). But if message dispatch has
+		// already been paid at the Source chain, the delivery transaction will refund relayer with
+		// this additional cost. But `estimate_extrinsic_fee` obviously just returns pre-dispatch
+		// cost of the transaction. So if transaction delivers prepaid message, then it may happen
+		// that pre-dispatch cost is larger than reward and `Rational` relayer will refuse to
+		// deliver this message.
 		//
-		// The most obvious solution would be to deduct total weight of dispatch fee payments from the
-		// `total_dispatch_weight` and use regular `estimate_extrinsic_fee` call. But what if
-		// `total_dispatch_weight` is less than total dispatch fee payments weight? Weight is strictly
-		// positive, so we can't use this option.
+		// The most obvious solution would be to deduct total weight of dispatch fee payments from
+		// the `total_dispatch_weight` and use regular `estimate_extrinsic_fee` call. But what if
+		// `total_dispatch_weight` is less than total dispatch fee payments weight? Weight is
+		// strictly positive, so we can't use this option.
 		//
-		// Instead we'll be directly using `WeightToFee` and `NextFeeMultiplier` of the Target chain.
-		// This requires more knowledge of the Target chain, but seems there's no better way to solve
-		// this now.
+		// Instead we'll be directly using `WeightToFee` and `NextFeeMultiplier` of the Target
+		// chain. This requires more knowledge of the Target chain, but seems there's no better way
+		// to solve this now.
 		let expected_refund_in_target_tokens = if total_prepaid_nonces != 0 {
 			const WEIGHT_DIFFERENCE: Weight = 100;
 
@@ -294,7 +302,11 @@ where
 					Zero::zero(),
 					HeaderId(Default::default(), Default::default()),
 					nonces.clone(),
-					prepare_dummy_messages_proof::<P::SourceChain>(nonces.clone(), larger_dispatch_weight, total_size),
+					prepare_dummy_messages_proof::<P::SourceChain>(
+						nonces.clone(),
+						larger_dispatch_weight,
+						total_size,
+					),
 				))
 				.await?;
 
@@ -311,10 +323,11 @@ where
 			Zero::zero()
 		};
 
-		let delivery_fee_in_source_tokens = convert_target_tokens_to_source_tokens::<P::SourceChain, P::TargetChain>(
-			FixedU128::from_float(conversion_rate),
-			inclusion_fee_in_target_tokens.saturating_sub(expected_refund_in_target_tokens),
-		);
+		let delivery_fee_in_source_tokens =
+			convert_target_tokens_to_source_tokens::<P::SourceChain, P::TargetChain>(
+				FixedU128::from_float(conversion_rate),
+				inclusion_fee_in_target_tokens.saturating_sub(expected_refund_in_target_tokens),
+			);
 
 		log::trace!(
 			target: "bridge",
@@ -356,7 +369,10 @@ fn prepare_dummy_messages_proof<SC: Chain>(
 		total_dispatch_weight,
 		FromBridgedChainMessagesProof {
 			bridged_header_hash: Default::default(),
-			storage_proof: vec![vec![0; SC::STORAGE_PROOF_OVERHEAD.saturating_add(total_size) as usize]],
+			storage_proof: vec![vec![
+				0;
+				SC::STORAGE_PROOF_OVERHEAD.saturating_add(total_size) as usize
+			]],
 			lane: Default::default(),
 			nonces_start: *nonces.start(),
 			nonces_end: *nonces.end(),
@@ -373,8 +389,10 @@ fn convert_target_tokens_to_source_tokens<SC: Chain, TC: Chain>(
 where
 	SC::Balance: TryFrom<TC::Balance>,
 {
-	SC::Balance::try_from(target_to_source_conversion_rate.saturating_mul_int(target_transaction_fee))
-		.unwrap_or_else(|_| SC::Balance::max_value())
+	SC::Balance::try_from(
+		target_to_source_conversion_rate.saturating_mul_int(target_transaction_fee),
+	)
+	.unwrap_or_else(|_| SC::Balance::max_value())
 }
 
 /// Compute fee multiplier that is used by the chain, given a couple of fees for transactions
@@ -392,7 +410,8 @@ fn compute_fee_multiplier<C: Chain>(
 	larger_adjusted_weight_fee: BalanceOf<C>,
 	larger_tx_weight: Weight,
 ) -> FixedU128 {
-	let adjusted_weight_fee_difference = larger_adjusted_weight_fee.saturating_sub(smaller_adjusted_weight_fee);
+	let adjusted_weight_fee_difference =
+		larger_adjusted_weight_fee.saturating_sub(smaller_adjusted_weight_fee);
 	let smaller_tx_unadjusted_weight_fee = WeightToFeeOf::<C>::calc(&smaller_tx_weight);
 	let larger_tx_unadjusted_weight_fee = WeightToFeeOf::<C>::calc(&larger_tx_weight);
 	FixedU128::saturating_from_rational(
@@ -511,10 +530,12 @@ mod tests {
 		let multiplier = FixedU128::saturating_from_rational(1, 1000);
 
 		let smaller_weight = 1_000_000;
-		let smaller_adjusted_weight_fee = multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&smaller_weight));
+		let smaller_adjusted_weight_fee =
+			multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&smaller_weight));
 
 		let larger_weight = smaller_weight + 200_000;
-		let larger_adjusted_weight_fee = multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&larger_weight));
+		let larger_adjusted_weight_fee =
+			multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&larger_weight));
 
 		assert_eq!(
 			compute_fee_multiplier::<Rococo>(
@@ -533,7 +554,8 @@ mod tests {
 			compute_prepaid_messages_refund::<TestSubstrateMessageLane>(
 				10,
 				FixedU128::saturating_from_rational(110, 100),
-			) > (10 * TestSubstrateMessageLane::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN).into()
+			) > (10 * TestSubstrateMessageLane::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN)
+				.into()
 		);
 	}
 }
diff --git a/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
index 885b3a788d9f40a3604612b66366e3c87c2f6a87..4dad20579a90d2279c683aa3dff6a0d922b1d151 100644
--- a/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
+++ b/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
@@ -31,18 +31,23 @@ use relay_substrate_client::{
 	Chain, Client, HeaderIdOf, SyncHeader,
 };
 use relay_utils::{
-	metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, MaybeConnectionError,
+	metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient,
+	MaybeConnectionError,
 };
 
-use crate::finality_pipeline::{
-	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT, STALL_TIMEOUT,
+use crate::{
+	finality_pipeline::{
+		SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT,
+		STALL_TIMEOUT,
+	},
+	finality_target::SubstrateFinalityTarget,
 };
-use crate::finality_target::SubstrateFinalityTarget;
 
 /// On-demand Substrate <-> Substrate headers relay.
 ///
-/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages relay) needs
-/// it to continue its regular work. When enough headers are relayed, on-demand stops syncing headers.
+/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages
+/// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops
+/// syncing headers.
 #[derive(Clone)]
 pub struct OnDemandHeadersRelay<SourceChain: Chain> {
 	/// Relay task name.
@@ -68,7 +73,11 @@ impl<SourceChain: Chain> OnDemandHeadersRelay<SourceChain> {
 		TargetChain::BlockNumber: BlockNumberBase,
 		TargetSign: Clone + Send + Sync + 'static,
 		P: SubstrateFinalitySyncPipeline<
-			FinalitySyncPipeline = SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
+			FinalitySyncPipeline = SubstrateFinalityToSubstrate<
+				SourceChain,
+				TargetChain,
+				TargetSign,
+			>,
 			TargetChain = TargetChain,
 		>,
 	{
@@ -135,8 +144,11 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 		_,
 		SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
 	>::new(source_client.clone(), Some(required_header_number.clone()));
-	let mut finality_target =
-		SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone(), target_transactions_mortality);
+	let mut finality_target = SubstrateFinalityTarget::new(
+		target_client.clone(),
+		pipeline.clone(),
+		target_transactions_mortality,
+	);
 	let mut latest_non_mandatory_at_source = Zero::zero();
 
 	let mut restart_relay = true;
@@ -163,12 +175,16 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 				&mut finality_target,
 			)
 			.await;
-			continue;
+			continue
 		}
 
 		// read best finalized source header number from target
-		let best_finalized_source_header_at_target =
-			best_finalized_source_header_at_target::<SourceChain, _, _>(&finality_target, &relay_task_name).await;
+		let best_finalized_source_header_at_target = best_finalized_source_header_at_target::<
+			SourceChain,
+			_,
+			_,
+		>(&finality_target, &relay_task_name)
+		.await;
 		if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) {
 			relay_utils::relay_loop::reconnect_failed_client(
 				FailedClient::Target,
@@ -177,11 +193,12 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 				&mut finality_target,
 			)
 			.await;
-			continue;
+			continue
 		}
 
 		// submit mandatory header if some headers are missing
-		let best_finalized_source_header_at_target_fmt = format!("{:?}", best_finalized_source_header_at_target);
+		let best_finalized_source_header_at_target_fmt =
+			format!("{:?}", best_finalized_source_header_at_target);
 		let mandatory_scan_range = mandatory_headers_scan_range::<SourceChain>(
 			best_finalized_source_header_at_source.ok(),
 			best_finalized_source_header_at_target.ok(),
@@ -207,8 +224,8 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 					// there are no (or we don't need to relay them) mandatory headers in the range
 					// => to avoid scanning the same headers over and over again, remember that
 					latest_non_mandatory_at_source = mandatory_scan_range.1;
-				}
-				Err(e) => {
+				},
+				Err(e) =>
 					if e.is_connection_error() {
 						relay_utils::relay_loop::reconnect_failed_client(
 							FailedClient::Source,
@@ -217,9 +234,8 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 							&mut finality_target,
 						)
 						.await;
-						continue;
-					}
-				}
+						continue
+					},
 			}
 		}
 
@@ -230,7 +246,10 @@ async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 					finality_source.clone(),
 					finality_target.clone(),
 					FinalitySyncParams {
-						tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL),
+						tick: std::cmp::max(
+							SourceChain::AVERAGE_BLOCK_INTERVAL,
+							TargetChain::AVERAGE_BLOCK_INTERVAL,
+						),
 						recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT,
 						stall_timeout: STALL_TIMEOUT,
 						only_mandatory_headers,
@@ -279,12 +298,12 @@ async fn mandatory_headers_scan_range<C: Chain>(
 		.checked_sub(&best_finalized_source_header_at_target)
 		.unwrap_or_else(Zero::zero);
 	if current_headers_difference <= maximal_headers_difference {
-		return None;
+		return None
 	}
 
 	// if relay is already asked to sync headers, don't do anything yet
 	if required_header_number > best_finalized_source_header_at_target {
-		return None;
+		return None
 	}
 
 	Some((
@@ -293,7 +312,8 @@ async fn mandatory_headers_scan_range<C: Chain>(
 	))
 }
 
-/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay it.
+/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay
+/// it.
 ///
 /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise.
 async fn relay_mandatory_header_from_range<SourceChain: Chain, P>(
@@ -308,7 +328,8 @@ where
 	P: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
 	// search for mandatory header first
-	let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?;
+	let mandatory_source_header_number =
+		find_mandatory_header_in_range(finality_source, range).await?;
 
 	// if there are no mandatory headers - we have nothing to do
 	let mandatory_source_header_number = match mandatory_source_header_number {
@@ -320,7 +341,7 @@ where
 	// less than our `mandatory_source_header_number` before logging anything
 	let mut required_header_number = required_header_number.lock().await;
 	if *required_header_number >= mandatory_source_header_number {
-		return Ok(false);
+		return Ok(false)
 	}
 
 	log::trace!(
@@ -348,19 +369,16 @@ where
 	SubstrateFinalitySource<SourceChain, P>: FinalitySourceClient<P>,
 	P: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
-	finality_source
-		.on_chain_best_finalized_block_number()
-		.await
-		.map_err(|error| {
-			log::error!(
-				target: "bridge",
-				"Failed to read best finalized source header from source in {} relay: {:?}",
-				relay_task_name,
-				error,
-			);
+	finality_source.on_chain_best_finalized_block_number().await.map_err(|error| {
+		log::error!(
+			target: "bridge",
+			"Failed to read best finalized source header from source in {} relay: {:?}",
+			relay_task_name,
+			error,
+		);
 
-			error
-		})
+		error
+	})
 }
 
 /// Read best finalized source block number from target client.
@@ -375,19 +393,16 @@ where
 	P: SubstrateFinalitySyncPipeline,
 	P::FinalitySyncPipeline: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
-	finality_target
-		.best_finalized_source_block_number()
-		.await
-		.map_err(|error| {
-			log::error!(
-				target: "bridge",
-				"Failed to read best finalized source header from target in {} relay: {:?}",
-				relay_task_name,
-				error,
-			);
+	finality_target.best_finalized_source_block_number().await.map_err(|error| {
+		log::error!(
+			target: "bridge",
+			"Failed to read best finalized source header from target in {} relay: {:?}",
+			relay_task_name,
+			error,
+		);
 
-			error
-		})
+		error
+	})
 }
 
 /// Read first mandatory header in given inclusive range.
@@ -403,9 +418,10 @@ where
 {
 	let mut current = range.0;
 	while current <= range.1 {
-		let header: SyncHeader<SourceChain::Header> = finality_source.client().header_by_number(current).await?.into();
+		let header: SyncHeader<SourceChain::Header> =
+			finality_source.client().header_by_number(current).await?.into();
 		if header.is_mandatory() {
-			return Ok(Some(current));
+			return Ok(Some(current))
 		}
 
 		current += One::one();
@@ -431,7 +447,13 @@ mod tests {
 	#[async_std::test]
 	async fn mandatory_headers_scan_range_selects_range_if_too_many_headers_are_missing() {
 		assert_eq!(
-			mandatory_headers_scan_range::<TestChain>(AT_SOURCE, AT_TARGET, 5, &Arc::new(Mutex::new(0))).await,
+			mandatory_headers_scan_range::<TestChain>(
+				AT_SOURCE,
+				AT_TARGET,
+				5,
+				&Arc::new(Mutex::new(0))
+			)
+			.await,
 			Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())),
 		);
 	}
@@ -439,7 +461,13 @@ mod tests {
 	#[async_std::test]
 	async fn mandatory_headers_scan_range_selects_nothing_if_enough_headers_are_relayed() {
 		assert_eq!(
-			mandatory_headers_scan_range::<TestChain>(AT_SOURCE, AT_TARGET, 10, &Arc::new(Mutex::new(0))).await,
+			mandatory_headers_scan_range::<TestChain>(
+				AT_SOURCE,
+				AT_TARGET,
+				10,
+				&Arc::new(Mutex::new(0))
+			)
+			.await,
 			None,
 		);
 	}
diff --git a/bridges/relays/messages/src/message_lane.rs b/bridges/relays/messages/src/message_lane.rs
index d03d40759731f326935ab3da45829fad60fe2e48..2b2d8029fc74b8237efebead2782315af43921e8 100644
--- a/bridges/relays/messages/src/message_lane.rs
+++ b/bridges/relays/messages/src/message_lane.rs
@@ -61,7 +61,9 @@ pub trait MessageLane: 'static + Clone + Send + Sync {
 }
 
 /// Source header id within given one-way message lane.
-pub type SourceHeaderIdOf<P> = HeaderId<<P as MessageLane>::SourceHeaderHash, <P as MessageLane>::SourceHeaderNumber>;
+pub type SourceHeaderIdOf<P> =
+	HeaderId<<P as MessageLane>::SourceHeaderHash, <P as MessageLane>::SourceHeaderNumber>;
 
 /// Target header id within given one-way message lane.
-pub type TargetHeaderIdOf<P> = HeaderId<<P as MessageLane>::TargetHeaderHash, <P as MessageLane>::TargetHeaderNumber>;
+pub type TargetHeaderIdOf<P> =
+	HeaderId<<P as MessageLane>::TargetHeaderHash, <P as MessageLane>::TargetHeaderNumber>;
diff --git a/bridges/relays/messages/src/message_lane_loop.rs b/bridges/relays/messages/src/message_lane_loop.rs
index 72c984d2990cc0b39e0ecb4d09363c626e5180ed..595d241bf3014cc5cacac24e4fc97912502ba23f 100644
--- a/bridges/relays/messages/src/message_lane_loop.rs
+++ b/bridges/relays/messages/src/message_lane_loop.rs
@@ -24,10 +24,12 @@
 //! finalized header. I.e. when talking about headers in lane context, we
 //! only care about finalized headers.
 
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_race_delivery::run as run_message_delivery_race;
-use crate::message_race_receiving::run as run_message_receiving_race;
-use crate::metrics::MessageLaneLoopMetrics;
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_race_delivery::run as run_message_delivery_race,
+	message_race_receiving::run as run_message_receiving_race,
+	metrics::MessageLaneLoopMetrics,
+};
 
 use async_trait::async_trait;
 use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
@@ -64,20 +66,22 @@ pub struct Params {
 pub enum RelayerMode {
 	/// The relayer doesn't care about rewards.
 	Altruistic,
-	/// The relayer will deliver all messages and confirmations as long as he's not losing any funds.
+	/// The relayer will deliver all messages and confirmations as long as he's not losing any
+	/// funds.
 	Rational,
 }
 
 /// Message delivery race parameters.
 #[derive(Debug, Clone)]
 pub struct MessageDeliveryParams {
-	/// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number of entries
-	/// in the `InboundLaneData::relayers` set, all new messages will be rejected until reward payment will
-	/// be proved (by including outbound lane state to the message delivery transaction).
+	/// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number
+	/// of entries in the `InboundLaneData::relayers` set, all new messages will be rejected until
+	/// reward payment will be proved (by including outbound lane state to the message delivery
+	/// transaction).
 	pub max_unrewarded_relayer_entries_at_target: MessageNonce,
-	/// Message delivery race will stop delivering messages if there are `max_unconfirmed_nonces_at_target`
-	/// unconfirmed nonces on the target node. The race would continue once they're confirmed by the
-	/// receiving race.
+	/// Message delivery race will stop delivering messages if there are
+	/// `max_unconfirmed_nonces_at_target` unconfirmed nonces on the target node. The race would
+	/// continue once they're confirmed by the receiving race.
 	pub max_unconfirmed_nonces_at_target: MessageNonce,
 	/// Maximal number of relayed messages in single delivery transaction.
 	pub max_messages_in_single_batch: MessageNonce,
@@ -103,7 +107,8 @@ pub struct MessageDetails<SourceChainBalance> {
 }
 
 /// Messages details map.
-pub type MessageDetailsMap<SourceChainBalance> = BTreeMap<MessageNonce, MessageDetails<SourceChainBalance>>;
+pub type MessageDetailsMap<SourceChainBalance> =
+	BTreeMap<MessageNonce, MessageDetails<SourceChainBalance>>;
 
 /// Message delivery race proof parameters.
 #[derive(Debug, PartialEq)]
@@ -225,7 +230,8 @@ pub struct ClientState<SelfHeaderId, PeerHeaderId> {
 	pub best_self: SelfHeaderId,
 	/// Best finalized header id of this chain.
 	pub best_finalized_self: SelfHeaderId,
-	/// Best finalized header id of the peer chain read at the best block of this chain (at `best_finalized_self`).
+	/// Best finalized header id of the peer chain read at the best block of this chain (at
+	/// `best_finalized_self`).
 	pub best_finalized_peer_at_best_self: PeerHeaderId,
 }
 
@@ -244,14 +250,10 @@ pub struct ClientsState<P: MessageLane> {
 	pub target: Option<TargetClientState<P>>,
 }
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: MessageLane>(lane: &LaneId) -> String {
-	format!(
-		"{}_to_{}_MessageLane_{}",
-		P::SOURCE_NAME,
-		P::TARGET_NAME,
-		hex::encode(lane)
-	)
+	format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, hex::encode(lane))
 }
 
 /// Run message lane service loop.
@@ -270,22 +272,20 @@ pub async fn run<P: MessageLane>(
 		.standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))?
 		.expose()
 		.await?
-		.run(
-			metrics_prefix::<P>(&params.lane),
-			move |source_client, target_client, metrics| {
-				run_until_connection_lost(
-					params.clone(),
-					source_client,
-					target_client,
-					metrics,
-					exit_signal.clone(),
-				)
-			},
-		)
+		.run(metrics_prefix::<P>(&params.lane), move |source_client, target_client, metrics| {
+			run_until_connection_lost(
+				params.clone(),
+				source_client,
+				target_client,
+				metrics,
+				exit_signal.clone(),
+			)
+		})
 		.await
 }
 
-/// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received.
+/// Run one-way message delivery loop until connection with target or source node is lost, or exit
+/// signal is received.
 async fn run_until_connection_lost<P: MessageLane, SC: SourceClient<P>, TC: TargetClient<P>>(
 	params: Params,
 	source_client: SC,
@@ -557,7 +557,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_source_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok(data.source_state.clone())
 		}
@@ -569,7 +569,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_source_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.source_latest_generated_nonce))
 		}
@@ -609,11 +609,7 @@ pub(crate) mod tests {
 			nonces: RangeInclusive<MessageNonce>,
 			proof_parameters: MessageProofParameters,
 		) -> Result<
-			(
-				SourceHeaderIdOf<TestMessageLane>,
-				RangeInclusive<MessageNonce>,
-				TestMessagesProof,
-			),
+			(SourceHeaderIdOf<TestMessageLane>, RangeInclusive<MessageNonce>, TestMessagesProof),
 			TestError,
 		> {
 			let mut data = self.data.lock();
@@ -694,7 +690,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok(data.target_state.clone())
 		}
@@ -706,7 +702,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.target_latest_received_nonce))
 		}
@@ -732,7 +728,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.target_latest_confirmed_received_nonce))
 		}
@@ -753,14 +749,15 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			data.target_state.best_self =
 				HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1);
 			data.target_state.best_finalized_self = data.target_state.best_self;
 			data.target_latest_received_nonce = *proof.0.end();
 			if let Some(target_latest_confirmed_received_nonce) = proof.1 {
-				data.target_latest_confirmed_received_nonce = target_latest_confirmed_received_nonce;
+				data.target_latest_confirmed_received_nonce =
+					target_latest_confirmed_received_nonce;
 			}
 			data.submitted_messages_proofs.push(proof);
 			Ok(nonces)
@@ -780,11 +777,9 @@ pub(crate) mod tests {
 			total_dispatch_weight: Weight,
 			total_size: u32,
 		) -> Result<TestSourceChainBalance, TestError> {
-			Ok(
-				BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1)
-					+ total_dispatch_weight
-					+ total_size as TestSourceChainBalance,
-			)
+			Ok(BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) +
+				total_dispatch_weight +
+				total_size as TestSourceChainBalance)
 		}
 	}
 
@@ -797,14 +792,8 @@ pub(crate) mod tests {
 		async_std::task::block_on(async {
 			let data = Arc::new(Mutex::new(data));
 
-			let source_client = TestSourceClient {
-				data: data.clone(),
-				tick: source_tick,
-			};
-			let target_client = TestTargetClient {
-				data: data.clone(),
-				tick: target_tick,
-			};
+			let source_client = TestSourceClient { data: data.clone(), tick: source_tick };
+			let target_client = TestTargetClient { data: data.clone(), tick: target_tick };
 			let _ = run(
 				Params {
 					lane: [0, 0, 0, 0],
@@ -907,7 +896,10 @@ pub(crate) mod tests {
 				data.source_state.best_finalized_self = data.source_state.best_self;
 				// headers relay must only be started when we need new target headers at source node
 				if data.target_to_source_header_required.is_some() {
-					assert!(data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_self.0);
+					assert!(
+						data.source_state.best_finalized_peer_at_best_self.0 <
+							data.target_state.best_self.0
+					);
 					data.target_to_source_header_required = None;
 				}
 				// syncing target headers -> source chain
@@ -924,7 +916,10 @@ pub(crate) mod tests {
 				data.target_state.best_finalized_self = data.target_state.best_self;
 				// headers relay must only be started when we need new source headers at target node
 				if data.source_to_target_header_required.is_some() {
-					assert!(data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_self.0);
+					assert!(
+						data.target_state.best_finalized_peer_at_best_self.0 <
+							data.source_state.best_self.0
+					);
 					data.source_to_target_header_required = None;
 				}
 				// syncing source headers -> target chain
diff --git a/bridges/relays/messages/src/message_race_delivery.rs b/bridges/relays/messages/src/message_race_delivery.rs
index 8a5f8d7df260dc7a2d958ce1f5a01859a302a4a5..3433b683d7a7739e9abd93c831825ae50f6c291c 100644
--- a/bridges/relays/messages/src/message_race_delivery.rs
+++ b/bridges/relays/messages/src/message_race_delivery.rs
@@ -13,18 +13,20 @@
 
 //! Message delivery race delivers proof-of-messages from "lane.source" to "lane.target".
 
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_lane_loop::{
-	MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, RelayerMode,
-	SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient,
-	TargetClientState,
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane_loop::{
+		MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, RelayerMode,
+		SourceClient as MessageLaneSourceClient, SourceClientState,
+		TargetClient as MessageLaneTargetClient, TargetClientState,
+	},
+	message_race_loop::{
+		MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces,
+		TargetClient, TargetClientNonces,
+	},
+	message_race_strategy::{BasicStrategy, SourceRangesQueue},
+	metrics::MessageLaneLoopMetrics,
 };
-use crate::message_race_loop::{
-	MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient,
-	TargetClientNonces,
-};
-use crate::message_race_strategy::{BasicStrategy, SourceRangesQueue};
-use crate::metrics::MessageLaneLoopMetrics;
 
 use async_trait::async_trait;
 use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight};
@@ -66,7 +68,8 @@ pub async fn run<P: MessageLane>(
 		MessageDeliveryStrategy::<P, _, _> {
 			lane_source_client: source_client,
 			lane_target_client: target_client,
-			max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target,
+			max_unrewarded_relayer_entries_at_target: params
+				.max_unrewarded_relayer_entries_at_target,
 			max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target,
 			max_messages_in_single_batch: params.max_messages_in_single_batch,
 			max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch,
@@ -121,8 +124,10 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		prev_latest_nonce: MessageNonce,
 	) -> Result<(SourceHeaderIdOf<P>, SourceClientNonces<Self::NoncesRange>), Self::Error> {
-		let (at_block, latest_generated_nonce) = self.client.latest_generated_nonce(at_block).await?;
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, latest_generated_nonce) =
+			self.client.latest_generated_nonce(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
 
 		if let Some(metrics_msg) = self.metrics_msg.as_ref() {
 			metrics_msg.update_source_latest_generated_nonce::<P>(latest_generated_nonce);
@@ -131,7 +136,10 @@ where
 
 		let new_nonces = if latest_generated_nonce > prev_latest_nonce {
 			self.client
-				.generated_message_details(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce)
+				.generated_message_details(
+					at_block.clone(),
+					prev_latest_nonce + 1..=latest_generated_nonce,
+				)
 				.await?
 		} else {
 			MessageDetailsMap::new()
@@ -139,10 +147,7 @@ where
 
 		Ok((
 			at_block,
-			SourceClientNonces {
-				new_nonces,
-				confirmed_nonce: Some(latest_confirmed_nonce),
-			},
+			SourceClientNonces { new_nonces, confirmed_nonce: Some(latest_confirmed_nonce) },
 		))
 	}
 
@@ -151,7 +156,8 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		nonces: RangeInclusive<MessageNonce>,
 		proof_parameters: Self::ProofParameters,
-	) -> Result<(SourceHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesProof), Self::Error> {
+	) -> Result<(SourceHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesProof), Self::Error>
+	{
 		self.client.prove_messages(at_block, nonces, proof_parameters).await
 	}
 }
@@ -180,10 +186,13 @@ where
 		&self,
 		at_block: TargetHeaderIdOf<P>,
 		update_metrics: bool,
-	) -> Result<(TargetHeaderIdOf<P>, TargetClientNonces<DeliveryRaceTargetNoncesData>), Self::Error> {
+	) -> Result<(TargetHeaderIdOf<P>, TargetClientNonces<DeliveryRaceTargetNoncesData>), Self::Error>
+	{
 		let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?;
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
-		let (at_block, unrewarded_relayers) = self.client.unrewarded_relayers_state(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, unrewarded_relayers) =
+			self.client.unrewarded_relayers_state(at_block).await?;
 
 		if update_metrics {
 			if let Some(metrics_msg) = self.metrics_msg.as_ref() {
@@ -210,9 +219,7 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		proof: P::MessagesProof,
 	) -> Result<RangeInclusive<MessageNonce>, Self::Error> {
-		self.client
-			.submit_messages_proof(generated_at_block, nonces, proof)
-			.await
+		self.client.submit_messages_proof(generated_at_block, nonces, proof).await
 	}
 }
 
@@ -245,7 +252,8 @@ struct MessageDeliveryStrategy<P: MessageLane, SC, TC> {
 	max_messages_size_in_single_batch: u32,
 	/// Relayer operating mode.
 	relayer_mode: RelayerMode,
-	/// Latest confirmed nonces at the source client + the header id where we have first met this nonce.
+	/// Latest confirmed nonces at the source client + the header id where we have first met this
+	/// nonce.
 	latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf<P>, MessageNonce)>,
 	/// Target nonces from the source client.
 	target_nonces: Option<TargetClientNonces<DeliveryRaceTargetNoncesData>>,
@@ -269,23 +277,11 @@ impl<P: MessageLane, SC, TC> std::fmt::Debug for MessageDeliveryStrategy<P, SC,
 				"max_unrewarded_relayer_entries_at_target",
 				&self.max_unrewarded_relayer_entries_at_target,
 			)
-			.field(
-				"max_unconfirmed_nonces_at_target",
-				&self.max_unconfirmed_nonces_at_target,
-			)
+			.field("max_unconfirmed_nonces_at_target", &self.max_unconfirmed_nonces_at_target)
 			.field("max_messages_in_single_batch", &self.max_messages_in_single_batch)
-			.field(
-				"max_messages_weight_in_single_batch",
-				&self.max_messages_weight_in_single_batch,
-			)
-			.field(
-				"max_messages_size_in_single_batch",
-				&self.max_messages_size_in_single_batch,
-			)
-			.field(
-				"latest_confirmed_nonces_at_source",
-				&self.latest_confirmed_nonces_at_source,
-			)
+			.field("max_messages_weight_in_single_batch", &self.max_messages_weight_in_single_batch)
+			.field("max_messages_size_in_single_batch", &self.max_messages_size_in_single_batch)
+			.field("latest_confirmed_nonces_at_source", &self.latest_confirmed_nonces_at_source)
 			.field("target_nonces", &self.target_nonces)
 			.field("strategy", &self.strategy)
 			.finish()
@@ -319,8 +315,12 @@ where
 		self.strategy.is_empty()
 	}
 
-	fn required_source_header_at_target(&self, current_best: &SourceHeaderIdOf<P>) -> Option<SourceHeaderIdOf<P>> {
-		let header_required_for_messages_delivery = self.strategy.required_source_header_at_target(current_best);
+	fn required_source_header_at_target(
+		&self,
+		current_best: &SourceHeaderIdOf<P>,
+	) -> Option<SourceHeaderIdOf<P>> {
+		let header_required_for_messages_delivery =
+			self.strategy.required_source_header_at_target(current_best);
 		let header_required_for_reward_confirmations_delivery =
 			self.latest_confirmed_nonces_at_source.back().map(|(id, _)| id.clone());
 		match (
@@ -371,10 +371,7 @@ where
 		self.target_nonces = Some(target_nonces);
 
 		self.strategy.best_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: nonces.latest_nonce,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () },
 			race_state,
 		)
 	}
@@ -399,14 +396,12 @@ where
 		}
 
 		if let Some(ref mut target_nonces) = self.target_nonces {
-			target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce);
+			target_nonces.latest_nonce =
+				std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce);
 		}
 
 		self.strategy.finalized_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: nonces.latest_nonce,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () },
 			race_state,
 		)
 	}
@@ -428,12 +423,15 @@ where
 		// There's additional condition in the message delivery race: target would reject messages
 		// if there are too much unconfirmed messages at the inbound lane.
 
-		// The receiving race is responsible to deliver confirmations back to the source chain. So if
-		// there's a lot of unconfirmed messages, let's wait until it'll be able to do its job.
+		// The receiving race is responsible to deliver confirmations back to the source chain. So
+		// if there's a lot of unconfirmed messages, let's wait until it'll be able to do its job.
 		let latest_received_nonce_at_target = target_nonces.latest_nonce;
-		let confirmations_missing = latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source);
+		let confirmations_missing =
+			latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source);
 		match confirmations_missing {
-			Some(confirmations_missing) if confirmations_missing >= self.max_unconfirmed_nonces_at_target => {
+			Some(confirmations_missing)
+				if confirmations_missing >= self.max_unconfirmed_nonces_at_target =>
+			{
 				log::debug!(
 					target: "bridge",
 					"Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \
@@ -445,50 +443,55 @@ where
 					self.max_unconfirmed_nonces_at_target,
 				);
 
-				return None;
+				return None
 			}
 			_ => (),
 		}
 
-		// Ok - we may have new nonces to deliver. But target may still reject new messages, because we haven't
-		// notified it that (some) messages have been confirmed. So we may want to include updated
-		// `source.latest_confirmed` in the proof.
+		// Ok - we may have new nonces to deliver. But target may still reject new messages, because
+		// we haven't notified it that (some) messages have been confirmed. So we may want to
+		// include updated `source.latest_confirmed` in the proof.
 		//
-		// Important note: we're including outbound state lane proof whenever there are unconfirmed nonces
-		// on the target chain. Other strategy is to include it only if it's absolutely necessary.
+		// Important note: we're including outbound state lane proof whenever there are unconfirmed
+		// nonces on the target chain. Other strategy is to include it only if it's absolutely
+		// necessary.
 		let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce;
-		let outbound_state_proof_required = latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source;
+		let outbound_state_proof_required =
+			latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source;
 
 		// The target node would also reject messages if there are too many entries in the
 		// "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then
 		// we should wait for confirmations race.
 		let unrewarded_relayer_entries_limit_reached =
-			target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries
-				>= self.max_unrewarded_relayer_entries_at_target;
+			target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries >=
+				self.max_unrewarded_relayer_entries_at_target;
 		if unrewarded_relayer_entries_limit_reached {
 			// so there are already too many unrewarded relayer entries in the set
 			//
-			// => check if we can prove enough rewards. If not, we should wait for more rewards to be paid
+			// => check if we can prove enough rewards. If not, we should wait for more rewards to
+			// be paid
 			let number_of_rewards_being_proved =
 				latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target);
-			let enough_rewards_being_proved = number_of_rewards_being_proved
-				>= target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry;
+			let enough_rewards_being_proved = number_of_rewards_being_proved >=
+				target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry;
 			if !enough_rewards_being_proved {
-				return None;
+				return None
 			}
 		}
 
-		// If we're here, then the confirmations race did its job && sending side now knows that messages
-		// have been delivered. Now let's select nonces that we want to deliver.
+		// If we're here, then the confirmations race did its job && sending side now knows that
+		// messages have been delivered. Now let's select nonces that we want to deliver.
 		//
 		// We may deliver at most:
 		//
-		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_target)
+		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target -
+		// latest_confirmed_nonce_at_target)
 		//
-		// messages in the batch. But since we're including outbound state proof in the batch, then it
-		// may be increased to:
+		// messages in the batch. But since we're including outbound state proof in the batch, then
+		// it may be increased to:
 		//
-		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_source)
+		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target -
+		// latest_confirmed_nonce_at_source)
 		let future_confirmed_nonce_at_target = if outbound_state_proof_required {
 			latest_confirmed_nonce_at_source
 		} else {
@@ -505,7 +508,8 @@ where
 		let lane_source_client = self.lane_source_client.clone();
 		let lane_target_client = self.lane_target_client.clone();
 
-		let maximal_source_queue_index = self.strategy.maximal_available_source_queue_index(race_state)?;
+		let maximal_source_queue_index =
+			self.strategy.maximal_available_source_queue_index(race_state)?;
 		let previous_total_dispatch_weight = self.total_queued_dispatch_weight();
 		let source_queue = self.strategy.source_queue();
 		let range_end = select_nonces_for_delivery_transaction(
@@ -529,10 +533,7 @@ where
 
 		Some((
 			selected_nonces,
-			MessageProofParameters {
-				outbound_state_proof_required,
-				dispatch_weight,
-			},
+			MessageProofParameters { outbound_state_proof_required, dispatch_weight },
 		))
 	}
 }
@@ -595,9 +596,9 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 
 		// limit messages in the batch by weight
 		let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) {
-			Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => {
-				new_selected_weight
-			}
+			Some(new_selected_weight)
+				if new_selected_weight <= max_messages_weight_in_single_batch =>
+				new_selected_weight,
 			new_selected_weight if selected_count == 0 => {
 				log::warn!(
 					target: "bridge",
@@ -607,13 +608,14 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 					max_messages_weight_in_single_batch,
 				);
 				new_selected_weight.unwrap_or(Weight::MAX)
-			}
+			},
 			_ => break,
 		};
 
 		// limit messages in the batch by size
 		let new_selected_size = match selected_size.checked_add(details.size) {
-			Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => new_selected_size,
+			Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch =>
+				new_selected_size,
 			new_selected_size if selected_count == 0 => {
 				log::warn!(
 					target: "bridge",
@@ -623,14 +625,14 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 					max_messages_size_in_single_batch,
 				);
 				new_selected_size.unwrap_or(u32::MAX)
-			}
+			},
 			_ => break,
 		};
 
 		// limit number of messages in the batch
 		let new_selected_count = selected_count + 1;
 		if new_selected_count > max_messages_in_this_batch {
-			break;
+			break
 		}
 
 		// If dispatch fee has been paid at the source chain, it means that it is **relayer** who's
@@ -639,13 +641,14 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 		// If dispatch fee is paid at the target chain, it means that it'll be withdrawn from the
 		// dispatch origin account AND reward is not covering this fee.
 		//
-		// So in the latter case we're not adding the dispatch weight to the delivery transaction weight.
+		// So in the latter case we're not adding the dispatch weight to the delivery transaction
+		// weight.
 		let mut new_selected_prepaid_nonces = selected_prepaid_nonces;
 		let new_selected_unpaid_weight = match details.dispatch_fee_payment {
 			DispatchFeePayment::AtSourceChain => {
 				new_selected_prepaid_nonces += 1;
 				selected_unpaid_weight.saturating_add(details.dispatch_weight)
-			}
+			},
 			DispatchFeePayment::AtTargetChain => selected_unpaid_weight,
 		};
 
@@ -654,11 +657,12 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 		match relayer_mode {
 			RelayerMode::Altruistic => {
 				soft_selected_count = index + 1;
-			}
+			},
 			RelayerMode::Rational => {
 				let delivery_transaction_cost = lane_target_client
 					.estimate_delivery_transaction_in_source_tokens(
-						hard_selected_begin_nonce..=(hard_selected_begin_nonce + index as MessageNonce),
+						hard_selected_begin_nonce..=
+							(hard_selected_begin_nonce + index as MessageNonce),
 						new_selected_prepaid_nonces,
 						new_selected_unpaid_weight,
 						new_selected_size as u32,
@@ -678,7 +682,8 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 				let is_total_reward_less_than_cost = total_reward < total_cost;
 				let prev_total_cost = total_cost;
 				let prev_total_reward = total_reward;
-				total_confirmations_cost = total_confirmations_cost.saturating_add(&confirmation_transaction_cost);
+				total_confirmations_cost =
+					total_confirmations_cost.saturating_add(&confirmation_transaction_cost);
 				total_reward = total_reward.saturating_add(&details.reward);
 				total_cost = total_confirmations_cost.saturating_add(&delivery_transaction_cost);
 				if !is_total_reward_less_than_cost && total_reward < total_cost {
@@ -713,7 +718,7 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 					selected_reward = total_reward;
 					selected_cost = total_cost;
 				}
-			}
+			},
 		}
 
 		hard_selected_count = index + 1;
@@ -725,9 +730,11 @@ async fn select_nonces_for_delivery_transaction<P: MessageLane>(
 	}
 
 	if hard_selected_count != soft_selected_count {
-		let hard_selected_end_nonce = hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1;
+		let hard_selected_end_nonce =
+			hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1;
 		let soft_selected_begin_nonce = hard_selected_begin_nonce;
-		let soft_selected_end_nonce = soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1;
+		let soft_selected_end_nonce =
+			soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1;
 		log::warn!(
 			target: "bridge",
 			"Relayer may deliver nonces [{:?}; {:?}], but because of its strategy ({:?}) it has selected \
@@ -785,9 +792,9 @@ mod tests {
 	use super::*;
 	use crate::message_lane_loop::{
 		tests::{
-			header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance, TestSourceClient,
-			TestSourceHeaderId, TestTargetClient, TestTargetHeaderId, BASE_MESSAGE_DELIVERY_TRANSACTION_COST,
-			CONFIRMATION_TRANSACTION_COST,
+			header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance,
+			TestSourceClient, TestSourceHeaderId, TestTargetClient, TestTargetHeaderId,
+			BASE_MESSAGE_DELIVERY_TRANSACTION_COST, CONFIRMATION_TRANSACTION_COST,
 		},
 		MessageDetails,
 	};
@@ -795,13 +802,14 @@ mod tests {
 
 	const DEFAULT_DISPATCH_WEIGHT: Weight = 1;
 	const DEFAULT_SIZE: u32 = 1;
-	const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST
-		+ BASE_MESSAGE_DELIVERY_TRANSACTION_COST
-		+ DEFAULT_DISPATCH_WEIGHT
-		+ (DEFAULT_SIZE as TestSourceChainBalance);
+	const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST +
+		BASE_MESSAGE_DELIVERY_TRANSACTION_COST +
+		DEFAULT_DISPATCH_WEIGHT +
+		(DEFAULT_SIZE as TestSourceChainBalance);
 
 	type TestRaceState = RaceState<TestSourceHeaderId, TestTargetHeaderId, TestMessagesProof>;
-	type TestStrategy = MessageDeliveryStrategy<TestMessageLane, TestSourceClient, TestTargetClient>;
+	type TestStrategy =
+		MessageDeliveryStrategy<TestMessageLane, TestSourceClient, TestTargetClient>;
 
 	fn source_nonces(
 		new_nonces: RangeInclusive<MessageNonce>,
@@ -863,14 +871,12 @@ mod tests {
 			strategy: BasicStrategy::new(),
 		};
 
-		race_strategy
-			.strategy
-			.source_nonces_updated(header_id(1), source_nonces(20..=23, 19, DEFAULT_REWARD, AtSourceChain));
+		race_strategy.strategy.source_nonces_updated(
+			header_id(1),
+			source_nonces(20..=23, 19, DEFAULT_REWARD, AtSourceChain),
+		);
 
-		let target_nonces = TargetClientNonces {
-			latest_nonce: 19,
-			nonces_data: (),
-		};
+		let target_nonces = TargetClientNonces { latest_nonce: 19, nonces_data: () };
 		race_strategy
 			.strategy
 			.best_target_nonces_updated(target_nonces.clone(), &mut race_state);
@@ -890,7 +896,9 @@ mod tests {
 
 	#[test]
 	fn weights_map_works_as_nonces_range() {
-		fn build_map(range: RangeInclusive<MessageNonce>) -> MessageDetailsMap<TestSourceChainBalance> {
+		fn build_map(
+			range: RangeInclusive<MessageNonce>,
+		) -> MessageDetailsMap<TestSourceChainBalance> {
 			range
 				.map(|idx| {
 					(
@@ -937,7 +945,8 @@ mod tests {
 		// we need to wait until confirmations will be delivered by receiving race
 		strategy.latest_confirmed_nonces_at_source = vec![(
 			header_id(1),
-			strategy.target_nonces.as_ref().unwrap().latest_nonce - strategy.max_unconfirmed_nonces_at_target,
+			strategy.target_nonces.as_ref().unwrap().latest_nonce -
+				strategy.max_unconfirmed_nonces_at_target,
 		)]
 		.into_iter()
 		.collect();
@@ -945,13 +954,16 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() {
+	async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available()
+	{
 		let (state, mut strategy) = prepare_strategy();
 
 		// if there are new confirmed nonces on source, we want to relay this information
 		// to target to prune rewards queue
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=23), proof_parameters(true, 4)))
@@ -965,8 +977,10 @@ mod tests {
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to wait until rewards will be paid
 		{
-			let mut unrewarded_relayers = &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			let mut unrewarded_relayers =
+				&mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 4;
 		}
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
@@ -979,12 +993,14 @@ mod tests {
 
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to prove at least `messages_in_oldest_entry` rewards
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		{
 			let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data;
 			nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
 			let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 4;
 		}
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
@@ -996,12 +1012,14 @@ mod tests {
 
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to prove at least `messages_in_oldest_entry` rewards
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		{
 			let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data;
 			nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3;
 			let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 3;
 		}
 		assert_eq!(
@@ -1023,15 +1041,13 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() {
+	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
-		// first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4)
-		strategy.strategy.source_queue_mut()[0]
-			.1
-			.get_mut(&20)
-			.unwrap()
-			.dispatch_weight = 10;
+		// first message doesn't fit in the batch, because it has weight (10) that overflows max
+		// weight (4)
+		strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().dispatch_weight = 10;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=20), proof_parameters(false, 10)))
@@ -1051,10 +1067,12 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() {
+	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
-		// first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4)
+		// first message doesn't fit in the batch, because it has weight (10) that overflows max
+		// weight (4)
 		strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
@@ -1066,7 +1084,8 @@ mod tests {
 	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() {
 		let (state, mut strategy) = prepare_strategy();
 
-		// not all queued messages may fit in the batch, because batch has max number of messages limit
+		// not all queued messages may fit in the batch, because batch has max number of messages
+		// limit
 		strategy.max_messages_in_single_batch = 3;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
@@ -1075,16 +1094,18 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() {
+	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
 		// 1 delivery confirmation from target to source is still missing, so we may only
 		// relay 3 new messages
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
-		strategy.latest_confirmed_nonces_at_source = vec![(header_id(1), prev_confirmed_nonce_at_source - 1)]
-			.into_iter()
-			.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		strategy.latest_confirmed_nonces_at_source =
+			vec![(header_id(1), prev_confirmed_nonce_at_source - 1)].into_iter().collect();
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=22), proof_parameters(false, 3)))
@@ -1099,30 +1120,35 @@ mod tests {
 		//
 		// => so we can't deliver more than 3 messages
 		let (mut state, mut strategy) = prepare_strategy();
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		strategy.latest_confirmed_nonces_at_source = vec![
 			(header_id(1), prev_confirmed_nonce_at_source - 1),
 			(header_id(2), prev_confirmed_nonce_at_source),
 		]
 		.into_iter()
 		.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(1));
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=22), proof_parameters(false, 3)))
 		);
 
-		// the same situation, but the header 2 is known to the target node, so we may deliver reward confirmation
+		// the same situation, but the header 2 is known to the target node, so we may deliver
+		// reward confirmation
 		let (mut state, mut strategy) = prepare_strategy();
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		strategy.latest_confirmed_nonces_at_source = vec![
 			(header_id(1), prev_confirmed_nonce_at_source - 1),
 			(header_id(2), prev_confirmed_nonce_at_source),
 		]
 		.into_iter()
 		.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		state.best_finalized_source_header_id_at_source = Some(header_id(2));
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(2));
 		assert_eq!(
@@ -1136,8 +1162,9 @@ mod tests {
 		// let's prepare situation when:
 		// - all messages [20; 23] have been generated at source block#1;
 		let (mut state, mut strategy) = prepare_strategy();
-		// - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because of unrewarded
-		//   relayers vector capacity;
+		//
+		// - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because
+		//   of unrewarded relayers vector capacity;
 		strategy.max_unconfirmed_nonces_at_target = 2;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state.clone()).await,
@@ -1158,19 +1185,15 @@ mod tests {
 			&mut state,
 		);
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
+		//
 		// - messages [1; 10] receiving confirmation has been delivered at source block#2;
 		strategy.source_nonces_updated(
 			header_id(2),
-			SourceClientNonces {
-				new_nonces: MessageDetailsMap::new(),
-				confirmed_nonce: Some(21),
-			},
+			SourceClientNonces { new_nonces: MessageDetailsMap::new(), confirmed_nonce: Some(21) },
 		);
+		//
 		// - so now we'll need to relay source block#11 to be able to accept messages [11; 20].
-		assert_eq!(
-			strategy.required_source_header_at_target(&header_id(1)),
-			Some(header_id(2))
-		);
+		assert_eq!(strategy.required_source_header_at_target(&header_id(1)), Some(header_id(2)));
 	}
 
 	#[async_std::test]
@@ -1233,8 +1256,8 @@ mod tests {
 
 			// so now we have:
 			// - 20..=23 with reward = cost
-			// - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` from the
-			//   cost, so it should be fine;
+			// - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT`
+			//   from the cost, so it should be fine;
 			// => when MSG#24 fee is paid at the target chain, strategy shall select all 20..=24
 			// => when MSG#25 fee is paid at the source chain, strategy shall only select 20..=23
 			strategy.select_nonces_to_deliver(state).await
@@ -1255,11 +1278,11 @@ mod tests {
 		// Real scenario that has happened on test deployments:
 		// 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue`
 		// 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue`
-		// 3) if block 2 is known to the target node, then both M1 and M2 are selected for single delivery,
-		//    even though weight(M1+M2) > larger than largest allowed weight
+		// 3) if block 2 is known to the target node, then both M1 and M2 are selected for single
+		// delivery,    even though weight(M1+M2) > larger than largest allowed weight
 		//
-		// This was happening because selector (`select_nonces_for_delivery_transaction`) has been called
-		// for every `source_queue` entry separately without preserving any context.
+		// This was happening because selector (`select_nonces_for_delivery_transaction`) has been
+		// called for every `source_queue` entry separately without preserving any context.
 		let (mut state, mut strategy) = prepare_strategy();
 		let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, AtSourceChain);
 		strategy.strategy.source_nonces_updated(header_id(2), nonces);
diff --git a/bridges/relays/messages/src/message_race_loop.rs b/bridges/relays/messages/src/message_race_loop.rs
index 399f8719804979e2092996258aa2ca497006390b..a7254f70ee4a472757bf0a77eef4cf82fb8924c3 100644
--- a/bridges/relays/messages/src/message_race_loop.rs
+++ b/bridges/relays/messages/src/message_race_loop.rs
@@ -54,10 +54,12 @@ pub trait MessageRace {
 }
 
 /// State of race source client.
-type SourceClientState<P> = ClientState<<P as MessageRace>::SourceHeaderId, <P as MessageRace>::TargetHeaderId>;
+type SourceClientState<P> =
+	ClientState<<P as MessageRace>::SourceHeaderId, <P as MessageRace>::TargetHeaderId>;
 
 /// State of race target client.
-type TargetClientState<P> = ClientState<<P as MessageRace>::TargetHeaderId, <P as MessageRace>::SourceHeaderId>;
+type TargetClientState<P> =
+	ClientState<<P as MessageRace>::TargetHeaderId, <P as MessageRace>::SourceHeaderId>;
 
 /// Inclusive nonces range.
 pub trait NoncesRange: Debug + Sized {
@@ -155,7 +157,10 @@ pub trait RaceStrategy<SourceHeaderId, TargetHeaderId, Proof>: Debug {
 	/// Should return true if nothing has to be synced.
 	fn is_empty(&self) -> bool;
 	/// Return id of source header that is required to be on target to continue synchronization.
-	fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option<SourceHeaderId>;
+	fn required_source_header_at_target(
+		&self,
+		current_best: &SourceHeaderId,
+	) -> Option<SourceHeaderId>;
 	/// Return the best nonce at source node.
 	///
 	/// `Some` is returned only if we are sure that the value is greater or equal
@@ -167,7 +172,11 @@ pub trait RaceStrategy<SourceHeaderId, TargetHeaderId, Proof>: Debug {
 	fn best_at_target(&self) -> Option<MessageNonce>;
 
 	/// Called when nonces are updated at source node of the race.
-	fn source_nonces_updated(&mut self, at_block: SourceHeaderId, nonces: SourceClientNonces<Self::SourceNoncesRange>);
+	fn source_nonces_updated(
+		&mut self,
+		at_block: SourceHeaderId,
+		nonces: SourceClientNonces<Self::SourceNoncesRange>,
+	);
 	/// Called when best nonces are updated at target node of the race.
 	fn best_target_nonces_updated(
 		&mut self,
@@ -430,8 +439,10 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 				strategy,
 			);
 
-			return Err(FailedClient::Both);
-		} else if race_state.nonces_to_submit.is_none() && race_state.nonces_submitted.is_none() && strategy.is_empty()
+			return Err(FailedClient::Both)
+		} else if race_state.nonces_to_submit.is_none() &&
+			race_state.nonces_submitted.is_none() &&
+			strategy.is_empty()
 		{
 			stall_countdown = Instant::now();
 		}
@@ -439,7 +450,8 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 		if source_client_is_online {
 			source_client_is_online = false;
 
-			let nonces_to_deliver = select_nonces_to_deliver(race_state.clone(), &mut strategy).await;
+			let nonces_to_deliver =
+				select_nonces_to_deliver(race_state.clone(), &mut strategy).await;
 			let best_at_source = strategy.best_at_source();
 
 			if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver {
@@ -451,9 +463,7 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 					at_block,
 				);
 				source_generate_proof.set(
-					race_source
-						.generate_proof(at_block, nonces_range, proof_parameters)
-						.fuse(),
+					race_source.generate_proof(at_block, nonces_range, proof_parameters).fuse(),
 				);
 			} else if source_nonces_required && best_at_source.is_some() {
 				log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name());
@@ -516,7 +526,9 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 	}
 }
 
-impl<SourceHeaderId, TargetHeaderId, Proof> Default for RaceState<SourceHeaderId, TargetHeaderId, Proof> {
+impl<SourceHeaderId, TargetHeaderId, Proof> Default
+	for RaceState<SourceHeaderId, TargetHeaderId, Proof>
+{
 	fn default() -> Self {
 		RaceState {
 			best_finalized_source_header_id_at_source: None,
@@ -539,7 +551,7 @@ where
 
 	let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10);
 	if !need_update {
-		return prev_time;
+		return prev_time
 	}
 
 	let now_best_nonce_at_source = strategy.best_at_source();
@@ -569,11 +581,7 @@ where
 		.select_nonces_to_deliver(race_state)
 		.await
 		.map(|(nonces_range, proof_parameters)| {
-			(
-				best_finalized_source_header_id_at_best_target,
-				nonces_range,
-				proof_parameters,
-			)
+			(best_finalized_source_header_id_at_best_target, nonces_range, proof_parameters)
 		})
 }
 
@@ -592,8 +600,14 @@ mod tests {
 		// target node only knows about source' BEST_AT_TARGET block
 		// source node has BEST_AT_SOURCE > BEST_AT_TARGET block
 		let mut race_state = RaceState::<_, _, ()> {
-			best_finalized_source_header_id_at_source: Some(HeaderId(BEST_AT_SOURCE, BEST_AT_SOURCE)),
-			best_finalized_source_header_id_at_best_target: Some(HeaderId(BEST_AT_TARGET, BEST_AT_TARGET)),
+			best_finalized_source_header_id_at_source: Some(HeaderId(
+				BEST_AT_SOURCE,
+				BEST_AT_SOURCE,
+			)),
+			best_finalized_source_header_id_at_best_target: Some(HeaderId(
+				BEST_AT_TARGET,
+				BEST_AT_TARGET,
+			)),
 			best_target_header_id: Some(HeaderId(0, 0)),
 			best_finalized_target_header_id: Some(HeaderId(0, 0)),
 			nonces_to_submit: None,
@@ -604,16 +618,10 @@ mod tests {
 		let mut strategy = BasicStrategy::new();
 		strategy.source_nonces_updated(
 			HeaderId(GENERATED_AT, GENERATED_AT),
-			SourceClientNonces {
-				new_nonces: 0..=10,
-				confirmed_nonce: None,
-			},
+			SourceClientNonces { new_nonces: 0..=10, confirmed_nonce: None },
 		);
 		strategy.best_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: 5u64,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: 5u64, nonces_data: () },
 			&mut race_state,
 		);
 
diff --git a/bridges/relays/messages/src/message_race_receiving.rs b/bridges/relays/messages/src/message_race_receiving.rs
index f9ac61352fbd1723c511dc8392f6adbcdd898a2c..5aa36cbd9c6dcf76fe86c1c70479ab5deb55deb6 100644
--- a/bridges/relays/messages/src/message_race_receiving.rs
+++ b/bridges/relays/messages/src/message_race_receiving.rs
@@ -13,16 +13,19 @@
 
 //! Message receiving race delivers proof-of-messages-delivery from "lane.target" to "lane.source".
 
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_lane_loop::{
-	SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient,
-	TargetClientState,
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane_loop::{
+		SourceClient as MessageLaneSourceClient, SourceClientState,
+		TargetClient as MessageLaneTargetClient, TargetClientState,
+	},
+	message_race_loop::{
+		MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient,
+		TargetClientNonces,
+	},
+	message_race_strategy::BasicStrategy,
+	metrics::MessageLaneLoopMetrics,
 };
-use crate::message_race_loop::{
-	MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces,
-};
-use crate::message_race_strategy::BasicStrategy;
-use crate::metrics::MessageLaneLoopMetrics;
 
 use async_trait::async_trait;
 use bp_messages::MessageNonce;
@@ -129,11 +132,7 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		_proof_parameters: Self::ProofParameters,
 	) -> Result<
-		(
-			TargetHeaderIdOf<P>,
-			RangeInclusive<MessageNonce>,
-			P::MessagesReceivingProof,
-		),
+		(TargetHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesReceivingProof),
 		Self::Error,
 	> {
 		self.client
@@ -168,19 +167,14 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		update_metrics: bool,
 	) -> Result<(SourceHeaderIdOf<P>, TargetClientNonces<()>), Self::Error> {
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
 		if update_metrics {
 			if let Some(metrics_msg) = self.metrics_msg.as_ref() {
 				metrics_msg.update_source_latest_confirmed_nonce::<P>(latest_confirmed_nonce);
 			}
 		}
-		Ok((
-			at_block,
-			TargetClientNonces {
-				latest_nonce: latest_confirmed_nonce,
-				nonces_data: (),
-			},
-		))
+		Ok((at_block, TargetClientNonces { latest_nonce: latest_confirmed_nonce, nonces_data: () }))
 	}
 
 	async fn submit_proof(
@@ -189,9 +183,7 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		proof: P::MessagesReceivingProof,
 	) -> Result<RangeInclusive<MessageNonce>, Self::Error> {
-		self.client
-			.submit_messages_receiving_proof(generated_at_block, proof)
-			.await?;
+		self.client.submit_messages_receiving_proof(generated_at_block, proof).await?;
 		Ok(nonces)
 	}
 }
diff --git a/bridges/relays/messages/src/message_race_strategy.rs b/bridges/relays/messages/src/message_race_strategy.rs
index bc04ad5bcd9bccaeb652f3d7aba78b87ecd7925e..9b9091b979f66d9bcc9ccde5be1a5b38c04e5a8a 100644
--- a/bridges/relays/messages/src/message_race_strategy.rs
+++ b/bridges/relays/messages/src/message_race_strategy.rs
@@ -17,7 +17,9 @@
 //! 2) new nonces may be proved to target node (i.e. they have appeared at the
 //!    block, which is known to the target node).
 
-use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces};
+use crate::message_race_loop::{
+	NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces,
+};
 
 use async_trait::async_trait;
 use bp_messages::MessageNonce;
@@ -40,15 +42,29 @@ pub struct BasicStrategy<
 > {
 	/// All queued nonces.
 	source_queue: SourceRangesQueue<SourceHeaderHash, SourceHeaderNumber, SourceNoncesRange>,
-	/// The best nonce known to target node (at its best block). `None` if it has not been received yet.
+	/// The best nonce known to target node (at its best block). `None` if it has not been received
+	/// yet.
 	best_target_nonce: Option<MessageNonce>,
 	/// Unused generic types dump.
 	_phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>,
 }
 
-impl<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-	BasicStrategy<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-where
+impl<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	>
+	BasicStrategy<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	> where
 	SourceHeaderHash: Clone,
 	SourceHeaderNumber: Clone + Ord,
 	SourceNoncesRange: NoncesRange,
@@ -79,9 +95,9 @@ where
 
 	/// Returns index of the latest source queue entry, that may be delivered to the target node.
 	///
-	/// Returns `None` if no entries may be delivered. All entries before and including the `Some(_)`
-	/// index are guaranteed to be witnessed at source blocks that are known to be finalized at the
-	/// target node.
+	/// Returns `None` if no entries may be delivered. All entries before and including the
+	/// `Some(_)` index are guaranteed to be witnessed at source blocks that are known to be
+	/// finalized at the target node.
 	pub fn maximal_available_source_queue_index(
 		&self,
 		race_state: RaceState<
@@ -95,12 +111,12 @@ where
 
 		// if we have already selected nonces that we want to submit, do nothing
 		if race_state.nonces_to_submit.is_some() {
-			return None;
+			return None
 		}
 
 		// if we already submitted some nonces, do nothing
 		if race_state.nonces_submitted.is_some() {
-			return None;
+			return None
 		}
 
 		// 1) we want to deliver all nonces, starting from `target_nonce + 1`
@@ -124,17 +140,34 @@ where
 		while let Some((queued_at, queued_range)) = self.source_queue.pop_front() {
 			if let Some(range_to_requeue) = queued_range.greater_than(nonce) {
 				self.source_queue.push_front((queued_at, range_to_requeue));
-				break;
+				break
 			}
 		}
 	}
 }
 
 #[async_trait]
-impl<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-	RaceStrategy<HeaderId<SourceHeaderHash, SourceHeaderNumber>, HeaderId<TargetHeaderHash, TargetHeaderNumber>, Proof>
-	for BasicStrategy<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-where
+impl<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	>
+	RaceStrategy<
+		HeaderId<SourceHeaderHash, SourceHeaderNumber>,
+		HeaderId<TargetHeaderHash, TargetHeaderNumber>,
+		Proof,
+	>
+	for BasicStrategy<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	> where
 	SourceHeaderHash: Clone + Debug + Send,
 	SourceHeaderNumber: Clone + Ord + Debug + Send,
 	SourceNoncesRange: NoncesRange + Debug + Send,
@@ -162,7 +195,8 @@ where
 	fn best_at_source(&self) -> Option<MessageNonce> {
 		let best_in_queue = self.source_queue.back().map(|(_, range)| range.end());
 		match (best_in_queue, self.best_target_nonce) {
-			(Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => Some(best_in_queue),
+			(Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce =>
+				Some(best_in_queue),
 			(_, Some(best_target_nonce)) => Some(best_target_nonce),
 			(_, None) => None,
 		}
@@ -205,18 +239,17 @@ where
 
 		if let Some(best_target_nonce) = self.best_target_nonce {
 			if nonce < best_target_nonce {
-				return;
+				return
 			}
 		}
 
 		while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) {
-			let maybe_subrange = self
-				.source_queue
-				.pop_front()
-				.and_then(|(at_block, range)| range.greater_than(nonce).map(|subrange| (at_block, subrange)));
+			let maybe_subrange = self.source_queue.pop_front().and_then(|(at_block, range)| {
+				range.greater_than(nonce).map(|subrange| (at_block, subrange))
+			});
 			if let Some((at_block, subrange)) = maybe_subrange {
 				self.source_queue.push_front((at_block, subrange));
-				break;
+				break
 			}
 		}
 
@@ -238,10 +271,8 @@ where
 			race_state.nonces_submitted = None;
 		}
 
-		self.best_target_nonce = Some(std::cmp::max(
-			self.best_target_nonce.unwrap_or(nonces.latest_nonce),
-			nonce,
-		));
+		self.best_target_nonce =
+			Some(std::cmp::max(self.best_target_nonce.unwrap_or(nonces.latest_nonce), nonce));
 	}
 
 	fn finalized_target_nonces_updated(
@@ -278,9 +309,12 @@ where
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::message_lane::MessageLane;
-	use crate::message_lane_loop::tests::{
-		header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, TestSourceHeaderNumber,
+	use crate::{
+		message_lane::MessageLane,
+		message_lane_loop::tests::{
+			header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash,
+			TestSourceHeaderNumber,
+		},
 	};
 
 	type SourceNoncesRange = RangeInclusive<MessageNonce>;
@@ -295,17 +329,11 @@ mod tests {
 	>;
 
 	fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces<SourceNoncesRange> {
-		SourceClientNonces {
-			new_nonces,
-			confirmed_nonce: None,
-		}
+		SourceClientNonces { new_nonces, confirmed_nonce: None }
 	}
 
 	fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> {
-		TargetClientNonces {
-			latest_nonce,
-			nonces_data: (),
-		}
+		TargetClientNonces { latest_nonce, nonces_data: () }
 	}
 
 	#[test]
@@ -420,18 +448,12 @@ mod tests {
 		strategy.source_nonces_updated(header_id(5), source_nonces(7..=8));
 
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(4));
-		assert_eq!(
-			strategy.select_nonces_to_deliver(state.clone()).await,
-			Some((1..=6, ()))
-		);
+		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=6, ())));
 		strategy.best_target_nonces_updated(target_nonces(6), &mut state);
 		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None);
 
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(5));
-		assert_eq!(
-			strategy.select_nonces_to_deliver(state.clone()).await,
-			Some((7..=8, ()))
-		);
+		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((7..=8, ())));
 		strategy.best_target_nonces_updated(target_nonces(8), &mut state);
 		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None);
 	}
@@ -471,16 +493,17 @@ mod tests {
 		strategy.source_nonces_updated(header_id(3), source_nonces(7..=9));
 
 		fn source_queue_nonces(
-			source_queue: &SourceRangesQueue<TestSourceHeaderHash, TestSourceHeaderNumber, SourceNoncesRange>,
+			source_queue: &SourceRangesQueue<
+				TestSourceHeaderHash,
+				TestSourceHeaderNumber,
+				SourceNoncesRange,
+			>,
 		) -> Vec<MessageNonce> {
 			source_queue.iter().flat_map(|(_, range)| range.clone()).collect()
 		}
 
 		strategy.remove_le_nonces_from_source_queue(1);
-		assert_eq!(
-			source_queue_nonces(&strategy.source_queue),
-			vec![2, 3, 4, 5, 6, 7, 8, 9],
-		);
+		assert_eq!(source_queue_nonces(&strategy.source_queue), vec![2, 3, 4, 5, 6, 7, 8, 9],);
 
 		strategy.remove_le_nonces_from_source_queue(5);
 		assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9],);
diff --git a/bridges/relays/messages/src/metrics.rs b/bridges/relays/messages/src/metrics.rs
index 8f115170523b4be3f510f33f6f3ae730a14301e3..8d6e480722e66b6f04bcdc95368932b154a5fdac 100644
--- a/bridges/relays/messages/src/metrics.rs
+++ b/bridges/relays/messages/src/metrics.rs
@@ -16,8 +16,10 @@
 
 //! Metrics for message lane relay loop.
 
-use crate::message_lane::MessageLane;
-use crate::message_lane_loop::{SourceClientState, TargetClientState};
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{SourceClientState, TargetClientState},
+};
 
 use bp_messages::MessageNonce;
 use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64};
@@ -81,28 +83,40 @@ impl MessageLaneLoopMetrics {
 	}
 
 	/// Update latest generated nonce at source.
-	pub fn update_source_latest_generated_nonce<P: MessageLane>(&self, source_latest_generated_nonce: MessageNonce) {
+	pub fn update_source_latest_generated_nonce<P: MessageLane>(
+		&self,
+		source_latest_generated_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["source_latest_generated"])
 			.set(source_latest_generated_nonce);
 	}
 
 	/// Update the latest confirmed nonce at source.
-	pub fn update_source_latest_confirmed_nonce<P: MessageLane>(&self, source_latest_confirmed_nonce: MessageNonce) {
+	pub fn update_source_latest_confirmed_nonce<P: MessageLane>(
+		&self,
+		source_latest_confirmed_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["source_latest_confirmed"])
 			.set(source_latest_confirmed_nonce);
 	}
 
 	/// Update the latest received nonce at target.
-	pub fn update_target_latest_received_nonce<P: MessageLane>(&self, target_latest_generated_nonce: MessageNonce) {
+	pub fn update_target_latest_received_nonce<P: MessageLane>(
+		&self,
+		target_latest_generated_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["target_latest_received"])
 			.set(target_latest_generated_nonce);
 	}
 
 	/// Update the latest confirmed nonce at target.
-	pub fn update_target_latest_confirmed_nonce<P: MessageLane>(&self, target_latest_confirmed_nonce: MessageNonce) {
+	pub fn update_target_latest_confirmed_nonce<P: MessageLane>(
+		&self,
+		target_latest_confirmed_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["target_latest_confirmed"])
 			.set(target_latest_confirmed_nonce);
diff --git a/bridges/relays/utils/src/initialize.rs b/bridges/relays/utils/src/initialize.rs
index b87937923bd4e0b70be329cb09190076fbc4a2e2..8c13a4d61cb3a5bc4062cf2ed1373bdb580fada1 100644
--- a/bridges/relays/utils/src/initialize.rs
+++ b/bridges/relays/utils/src/initialize.rs
@@ -62,14 +62,7 @@ pub fn initialize_logger(with_timestamp: bool) {
 			let log_level = color_level(record.level());
 			let log_target = color_target(record.target());
 
-			writeln!(
-				buf,
-				"{}{} {} {}",
-				loop_name_prefix(),
-				log_level,
-				log_target,
-				record.args(),
-			)
+			writeln!(buf, "{}{} {} {}", loop_name_prefix(), log_level, log_target, record.args(),)
 		});
 	}
 
@@ -81,12 +74,14 @@ pub(crate) fn initialize_loop(loop_name: String) {
 	LOOP_NAME.with(|g_loop_name| *g_loop_name.borrow_mut() = loop_name);
 }
 
-/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` call.
+/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop`
+/// call.
 fn loop_name_prefix() -> String {
 	// try_with to avoid panic outside of async-std task context
 	LOOP_NAME
 		.try_with(|loop_name| {
-			// using borrow is ok here, because loop is only initialized once (=> borrow_mut will only be called once)
+			// using borrow is ok here, because loop is only initialized once (=> borrow_mut will
+			// only be called once)
 			let loop_name = loop_name.borrow();
 			if loop_name.is_empty() {
 				String::new()
diff --git a/bridges/relays/utils/src/lib.rs b/bridges/relays/utils/src/lib.rs
index 446e00cd23e672e95d6cdb978c454fb1b1f2f85b..deec1d688a7de61f74736879f3f719d0063934b2 100644
--- a/bridges/relays/utils/src/lib.rs
+++ b/bridges/relays/utils/src/lib.rs
@@ -168,12 +168,12 @@ pub fn format_ids<Id: std::fmt::Debug>(mut ids: impl ExactSizeIterator<Item = Id
 			let id0 = ids.next().expect(NTH_PROOF);
 			let id1 = ids.next().expect(NTH_PROOF);
 			format!("[{:?}, {:?}]", id0, id1)
-		}
+		},
 		len => {
 			let id0 = ids.next().expect(NTH_PROOF);
 			let id_last = ids.last().expect(NTH_PROOF);
 			format!("{}:[{:?} ... {:?}]", len, id0, id_last)
-		}
+		},
 	}
 }
 
@@ -220,7 +220,10 @@ impl ProcessFutureResult {
 	/// Returns Ok(true) if future has succeeded.
 	/// Returns Ok(false) if future has failed with non-connection error.
 	/// Returns Err if future is `ConnectionFailed`.
-	pub fn fail_if_connection_error(self, failed_client: FailedClient) -> Result<bool, FailedClient> {
+	pub fn fail_if_connection_error(
+		self,
+		failed_client: FailedClient,
+	) -> Result<bool, FailedClient> {
 		match self {
 			ProcessFutureResult::Success => Ok(true),
 			ProcessFutureResult::Failed => Ok(false),
@@ -247,7 +250,7 @@ where
 			on_success(result);
 			retry_backoff.reset();
 			ProcessFutureResult::Success
-		}
+		},
 		Err(error) if error.is_connection_error() => {
 			log::error!(
 				target: "bridge",
@@ -259,7 +262,7 @@ where
 			retry_backoff.reset();
 			go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse());
 			ProcessFutureResult::ConnectionFailed
-		}
+		},
 		Err(error) => {
 			let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY);
 			log::error!(
@@ -272,6 +275,6 @@ where
 
 			go_offline_future.set(go_offline(retry_delay).fuse());
 			ProcessFutureResult::Failed
-		}
+		},
 	}
 }
diff --git a/bridges/relays/utils/src/metrics.rs b/bridges/relays/utils/src/metrics.rs
index 4855dba8ea246f14f5688725195c3b890d627d9a..5c796071c6d5b2e2cef7dca5edfece4fdd7ca9fa 100644
--- a/bridges/relays/utils/src/metrics.rs
+++ b/bridges/relays/utils/src/metrics.rs
@@ -82,21 +82,14 @@ pub trait StandaloneMetrics: Metrics {
 
 impl Default for MetricsAddress {
 	fn default() -> Self {
-		MetricsAddress {
-			host: "127.0.0.1".into(),
-			port: 9616,
-		}
+		MetricsAddress { host: "127.0.0.1".into(), port: 9616 }
 	}
 }
 
 impl MetricsParams {
 	/// Creates metrics params so that metrics are not exposed.
 	pub fn disabled() -> Self {
-		MetricsParams {
-			address: None,
-			registry: None,
-			metrics_prefix: None,
-		}
+		MetricsParams { address: None, registry: None, metrics_prefix: None }
 	}
 
 	/// Do not expose metrics.
@@ -114,11 +107,7 @@ impl MetricsParams {
 
 impl From<Option<MetricsAddress>> for MetricsParams {
 	fn from(address: Option<MetricsAddress>) -> Self {
-		MetricsParams {
-			address,
-			registry: None,
-			metrics_prefix: None,
-		}
+		MetricsParams { address, registry: None, metrics_prefix: None }
 	}
 }
 
@@ -134,7 +123,10 @@ pub fn metric_name(prefix: Option<&str>, name: &str) -> String {
 /// Set value of gauge metric.
 ///
 /// If value is `Ok(None)` or `Err(_)`, metric would have default value.
-pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &Gauge<V>, value: Result<Option<T>, E>) {
+pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(
+	gauge: &Gauge<V>,
+	value: Result<Option<T>, E>,
+) {
 	gauge.set(match value {
 		Ok(Some(value)) => {
 			log::trace!(
@@ -144,7 +136,7 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				value,
 			);
 			value
-		}
+		},
 		Ok(None) => {
 			log::warn!(
 				target: "bridge-metrics",
@@ -152,7 +144,7 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				gauge.desc().first().map(|d| &d.fq_name),
 			);
 			Default::default()
-		}
+		},
 		Err(error) => {
 			log::warn!(
 				target: "bridge-metrics",
@@ -161,6 +153,6 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				error,
 			);
 			Default::default()
-		}
+		},
 	})
 }
diff --git a/bridges/relays/utils/src/metrics/float_json_value.rs b/bridges/relays/utils/src/metrics/float_json_value.rs
index c610ac04dc97d0e0dc177e82a9d1c3e7b0e2107b..a57cc5a77f579dc924d2c8f72cbca10694747df7 100644
--- a/bridges/relays/utils/src/metrics/float_json_value.rs
+++ b/bridges/relays/utils/src/metrics/float_json_value.rs
@@ -14,7 +14,9 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::metrics::{metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics, F64};
+use crate::metrics::{
+	metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics, F64,
+};
 
 use async_std::sync::{Arc, RwLock};
 use async_trait::async_trait;
@@ -100,18 +102,12 @@ impl StandaloneMetrics for FloatJsonValueMetric {
 /// Parse HTTP service response.
 fn parse_service_response(json_path: &str, response: &str) -> Result<f64, String> {
 	let json = serde_json::from_str(response).map_err(|err| {
-		format!(
-			"Failed to parse HTTP service response: {:?}. Response: {:?}",
-			err, response,
-		)
+		format!("Failed to parse HTTP service response: {:?}. Response: {:?}", err, response,)
 	})?;
 
 	let mut selector = jsonpath_lib::selector(&json);
 	let maybe_selected_value = selector(json_path).map_err(|err| {
-		format!(
-			"Failed to select value from response: {:?}. Response: {:?}",
-			err, response,
-		)
+		format!("Failed to select value from response: {:?}. Response: {:?}", err, response,)
 	})?;
 	let selected_value = maybe_selected_value
 		.first()
@@ -121,7 +117,7 @@ fn parse_service_response(json_path: &str, response: &str) -> Result<f64, String
 		return Err(format!(
 			"Failed to parse float value {:?} from response. It is assumed to be positive and normal",
 			selected_value,
-		));
+		))
 	}
 
 	Ok(selected_value)
diff --git a/bridges/relays/utils/src/metrics/global.rs b/bridges/relays/utils/src/metrics/global.rs
index d212480510448339328494b7b470b12b6fec4fad..7746690a0c72cf9caa6ca75215cb3ffa755e231f 100644
--- a/bridges/relays/utils/src/metrics/global.rs
+++ b/bridges/relays/utils/src/metrics/global.rs
@@ -17,7 +17,8 @@
 //! Global system-wide Prometheus metrics exposed by relays.
 
 use crate::metrics::{
-	metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, F64, U64,
+	metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics,
+	F64, U64,
 };
 
 use async_std::sync::{Arc, Mutex};
@@ -50,7 +51,10 @@ impl GlobalMetrics {
 				registry,
 			)?,
 			process_cpu_usage_percentage: register(
-				Gauge::new(metric_name(prefix, "process_cpu_usage_percentage"), "Process CPU usage")?,
+				Gauge::new(
+					metric_name(prefix, "process_cpu_usage_percentage"),
+					"Process CPU usage",
+				)?,
 				registry,
 			)?,
 			process_memory_usage_bytes: register(
@@ -92,16 +96,19 @@ impl StandaloneMetrics for GlobalMetrics {
 					memory_usage,
 				);
 
-				self.process_cpu_usage_percentage
-					.set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 });
+				self.process_cpu_usage_percentage.set(if cpu_usage.is_finite() {
+					cpu_usage
+				} else {
+					0f64
+				});
 				self.process_memory_usage_bytes.set(memory_usage);
-			}
+			},
 			_ => {
 				log::warn!(
 					target: "bridge-metrics",
 					"Failed to refresh process information. Metrics may show obsolete values",
 				);
-			}
+			},
 		}
 	}
 
diff --git a/bridges/relays/utils/src/relay_loop.rs b/bridges/relays/utils/src/relay_loop.rs
index ef8ebf4e8a28dfa14b194386a60b3c1a596984b5..49047a8810d6f3ec440a25d424615af202d3ed28 100644
--- a/bridges/relays/utils/src/relay_loop.rs
+++ b/bridges/relays/utils/src/relay_loop.rs
@@ -14,8 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics};
-use crate::{FailedClient, MaybeConnectionError};
+use crate::{
+	metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics},
+	FailedClient, MaybeConnectionError,
+};
 
 use async_trait::async_trait;
 use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration};
@@ -45,15 +47,11 @@ impl Client for () {
 
 /// Returns generic loop that may be customized and started.
 pub fn relay_loop<SC, TC>(source_client: SC, target_client: TC) -> Loop<SC, TC, ()> {
-	Loop {
-		reconnect_delay: RECONNECT_DELAY,
-		source_client,
-		target_client,
-		loop_metric: None,
-	}
+	Loop { reconnect_delay: RECONNECT_DELAY, source_client, target_client, loop_metric: None }
 }
 
-/// Returns generic relay loop metrics that may be customized and used in one or several relay loops.
+/// Returns generic relay loop metrics that may be customized and used in one or several relay
+/// loops.
 pub fn relay_metrics(prefix: Option<String>, params: MetricsParams) -> LoopMetrics<(), (), ()> {
 	LoopMetrics {
 		relay_loop: Loop {
@@ -94,7 +92,11 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 	}
 
 	/// Start building loop metrics using given prefix.
-	pub fn with_metrics(self, prefix: Option<String>, params: MetricsParams) -> LoopMetrics<SC, TC, ()> {
+	pub fn with_metrics(
+		self,
+		prefix: Option<String>,
+		params: MetricsParams,
+	) -> LoopMetrics<SC, TC, ()> {
 		LoopMetrics {
 			relay_loop: Loop {
 				reconnect_delay: self.reconnect_delay,
@@ -111,8 +113,8 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 
 	/// Run relay loop.
 	///
-	/// This function represents an outer loop, which in turn calls provided `run_loop` function to do
-	/// actual job. When `run_loop` returns, this outer loop reconnects to failed client (source,
+	/// This function represents an outer loop, which in turn calls provided `run_loop` function to
+	/// do actual job. When `run_loop` returns, this outer loop reconnects to failed client (source,
 	/// target or both) and calls `run_loop` again.
 	pub async fn run<R, F>(mut self, loop_name: String, run_loop: R) -> anyhow::Result<()>
 	where
@@ -127,20 +129,20 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 
 			loop {
 				let loop_metric = self.loop_metric.clone();
-				let future_result = run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric);
+				let future_result =
+					run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric);
 				let result = future_result.await;
 
 				match result {
 					Ok(()) => break,
-					Err(failed_client) => {
+					Err(failed_client) =>
 						reconnect_failed_client(
 							failed_client,
 							self.reconnect_delay,
 							&mut self.source_client,
 							&mut self.target_client,
 						)
-						.await
-					}
+						.await,
 				}
 
 				log::debug!(target: "bridge", "Restarting relay loop");
@@ -177,8 +179,8 @@ impl<SC, TC, LM> LoopMetrics<SC, TC, LM> {
 		self,
 		create_metric: impl FnOnce(&Registry, Option<&str>) -> Result<M, PrometheusError>,
 	) -> anyhow::Result<Self> {
-		// since standalone metrics are updating themselves, we may just ignore the fact that the same
-		// standalone metric is exposed by several loops && only spawn single metric
+		// since standalone metrics are updating themselves, we may just ignore the fact that the
+		// same standalone metric is exposed by several loops && only spawn single metric
 		match create_metric(&self.registry, self.metrics_prefix.as_deref()) {
 			Ok(standalone_metrics) => standalone_metrics.spawn(),
 			Err(PrometheusError::AlreadyReg) => (),
@@ -252,8 +254,8 @@ pub async fn reconnect_failed_client(
 						reconnect_delay.as_secs(),
 						error,
 					);
-					continue;
-				}
+					continue
+				},
 			}
 		}
 		if failed_client == FailedClient::Both || failed_client == FailedClient::Target {
@@ -266,12 +268,12 @@ pub async fn reconnect_failed_client(
 						reconnect_delay.as_secs(),
 						error,
 					);
-					continue;
-				}
+					continue
+				},
 			}
 		}
 
-		break;
+		break
 	}
 }
 
@@ -280,8 +282,9 @@ fn create_metrics_registry(prefix: Option<String>) -> Registry {
 	match prefix {
 		Some(prefix) => {
 			assert!(!prefix.is_empty(), "Metrics prefix can not be empty");
-			Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed")
-		}
+			Registry::new_custom(Some(prefix), None)
+				.expect("only fails if prefix is empty; prefix is not empty; qed")
+		},
 		None => Registry::new(),
 	}
 }