diff --git a/polkadot/bridges/.config/lingua.dic b/polkadot/bridges/.config/lingua.dic
index 1fc67a5a5d249488b36cb04ba514f39b10fe3b29..f4b4d55d6e88224c83193253245fb65fe761793b 100644
--- a/polkadot/bridges/.config/lingua.dic
+++ b/polkadot/bridges/.config/lingua.dic
@@ -1,33 +1,111 @@
 90
-annualised/MS
-Apache-2.0/M
-AccountId/MS
-api/SM
-auth
-auths/SM
+
+&&
+1KB
+1MB
+5MB
+=
 API/SM
 APIs
-arg
-args
-aren
-async
+AccountId/MS
+Apache-2.0/M
+Autogenerated
+BFT/M
+BTC/S
 Best/MS
-benchmarking/MS
 BlockId
-BFT/M
-bitfield/MS
-blake2/MS
-blockchain/MS
-boolean
-borked
-BridgeStorage
 BlockNumber
-BTC/S
+BridgeStorage
 CLI/MS
 Chain1
 Chain2
 ChainSpec
 ChainTime
+DOT/S
+ERC-20
+Ethereum
+FN
+FinalizationError
+GPL/M
+GPLv3/M
+GiB/S
+Handler/MS
+Hasher
+HeaderA
+HeaderId
+InitiateChange
+Instance1
+Instance2
+Instance42
+InstantCurrencyPayments
+KSM/S
+KYC/M
+keypair/MS
+KeyPair
+Kovan
+Lane1
+Lane2
+Lane3
+LaneId
+MIN_SIZE
+MIT/M
+MMR
+MaxUnrewardedRelayerEntriesAtInboundLane
+MaybeExtra
+MaybeOrphan
+Merklized
+MessageNonce
+MessageNonces
+MessagePayload
+MetricsParams
+Millau/MS
+OldHeader
+OutboundMessages
+PoA
+PoV/MS
+Pre
+RLP
+RPC/MS
+Rialto/MS
+Relayer/MS
+Runtime1
+Runtime2
+SIZE_FACTOR
+SS58
+SS58Prefix
+STALL_SYNC_TIMEOUT
+SURI
+ServiceFactory/MS
+SignedExtension
+Stringified
+Submitter1
+S|N
+TCP
+ThisChain
+TODO
+U256
+Unparsed
+Vec
+WND/S
+Westend/MS
+Wococo/MS
+XCM/S
+XCMP/M
+annualised/MS
+api/SM
+aren
+arg
+args
+async
+auth
+auths/SM
+backoff
+benchmarking/MS
+best_substrate_header
+bitfield/MS
+blake2/MS
+blockchain/MS
+borked
 chain_getBlock
 choosen
 config/MS
@@ -36,146 +114,128 @@ crypto/MS
 customizable/B
 Debian/M
 decodable/MS
-DOT/S
-doesn
+delivery_and_dispatch_fee
+dev
+dispatchable
 dispatchables
+doesn
 ed25519
 enum/MS
-ERC-20
+entrypoint/MS
 ethereum/MS
 externality/MS
 extrinsic/MS
 extrinsics
 fedora/M
-FN
-FinalizationError
-GiB/S
-GPL/M
-GPLv3/M
-Handler/MS
-HeaderA
-HeaderId
+functor
+fuzzer
+hasher
+hardcoded
 https
 implementers
+include/BG
 inherent/MS
 initialize/RG
 instantiate/B
 intrinsic/MS
-intrinsics
-InitiateChange
-isn
+invariant/MS
+invariants
 io
+isn
+isolate/BG
 js
+jsonrpsee
+keccak
 keccak256/M
-keypair/MS
-KSM/S
-Lane1
-Lane2
-Lane3
-LaneId
+keyring
+keystore/MS
 kusama/S
-KYC/M
-keccak
-Kovan
+lane
+malus
+max_value
 merkle/MS
-MessageNonce
-MessageNonces
-Merklized
-MaybeOrphan
-MaybeExtra
-MetricsParams
-MessagePayload
+metadata
+millau
 misbehavior/SM
 misbehaviors
-MIN_SIZE
-MIT/M
-max_value
 multivalidator/SM
 natively
-OldHeader
+no_std
 nonces
 number
-no_std
 ok
 oneshot/MS
 others'
-OutboundMessages
+pallet_bridge_grandpa
+pallet_bridge_messages
+pallet_message_lane
 parablock/MS
 parachain/MS
+param/MS
 parameterize/D
-pallet_message_lane
 plancks
 polkadot/MS
 pov-block/MS
-PoA
-PoV/MS
 precommit
 prometheus
 proxying
-prune_end
-prune_depth
 provisioner/MS
+probabilistically
+prune_depth
+prune_end
+receival
+reconnection
 redhat/M
 repo/MS
-receival
-RPC/MS
-RLP
 runtime/MS
-Runtime1
-Runtime2
 rustc/MS
-ServiceFactory/MS
-SignedExtension
-SIZE_FACTOR
+relayer/MS
+shouldn
+source_at_target
+source_latest_confirmed
+source_latest_generated
+sp_finality_grandpa
+spawner
 sr25519
-SS58
-SS58Prefix
 src
-S|N
-SURI
-source
+stringified
 struct/MS
-Submitter1
 submitters/MS
 subsystem/MS
 subsystems'
-shouldn
+subcommand/MS
 synchronizer
+target_at_source
+target_latest_confirmed
+target_latest_received
 taskmanager/MS
 teleport/RG
 teleportation/SM
 teleporter/SM
 teleporters
 testnet/MS
+timeframe
+tokio
+timestamp
 trie/MS
 trustless/Y
-ThisChain
-TCP
+tuple
+u32
 ubuntu/M
-union/MSG
 undeliverable
 unfinalized
+union/MSG
 unpruned
 unservable/B
 unsynced
+updatable
+validator/SM
 ve
 vec
-Vec
-validator/SM
 verifier
 w3f/MS
+wakeup
 wasm/M
-WND/S
-XCM/S
-XCMP/M
-include/BG
-isolate/BG
-Instance1
-Instance2
-Instance42
-Pre
-Rialto
-stringified
-Stringified
-millau
-Millau
+websocket
+x2
+~
diff --git a/polkadot/bridges/.editorconfig b/polkadot/bridges/.editorconfig
index d67ffe8f90f4fadd4771c4423438d6e3c08b253c..e2375881ea0616906a877183bd8cfc625ed42357 100644
--- a/polkadot/bridges/.editorconfig
+++ b/polkadot/bridges/.editorconfig
@@ -14,3 +14,6 @@ indent_style=space
 indent_size=2
 tab_width=8
 end_of_line=lf
+
+[*.md]
+max_line_length=80
diff --git a/polkadot/bridges/.gitignore b/polkadot/bridges/.gitignore
index 0ab0857843256bb07e0f97192f74e6e6bada6fd3..5d10cfa41a4487247e2c331144d3dabf0ec5e6f7 100644
--- a/polkadot/bridges/.gitignore
+++ b/polkadot/bridges/.gitignore
@@ -18,6 +18,7 @@ hfuzz_workspace
 
 .DS_Store
 
+.cargo
 .idea
 .vscode
 *.iml
diff --git a/polkadot/bridges/.gitlab-ci.yml b/polkadot/bridges/.gitlab-ci.yml
index b49df92c73c897c55e7a5e0269f04b187fe2e3fc..839519a6952609ec5d0568ef012644376a0b1267 100644
--- a/polkadot/bridges/.gitlab-ci.yml
+++ b/polkadot/bridges/.gitlab-ci.yml
@@ -15,7 +15,7 @@ variables:                         &default-vars
   GIT_DEPTH:                       100
   CARGO_INCREMENTAL:               0
   ARCH:                            "x86_64"
-  CI_IMAGE:                        "paritytech/bridges-ci:production"
+  CI_IMAGE:                        "paritytech/bridges-ci:staging"
   RUST_BACKTRACE:                  full
 
 default:
@@ -76,6 +76,7 @@ default:
     - if: $CI_PIPELINE_SOURCE == "pipeline"
       when: never
     - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/              # i.e. v1.0, v2.1rc1
+    - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]{4}-[0-9]{2}-[0-9]{2}.*$/  # i.e. v2021-09-27, v2021-09-27-1
     # there are two types of nightly pipelines:
     # 1. this one is triggered by the schedule with $PIPELINE == "nightly", it's for releasing.
     # this job runs only on nightly pipeline with the mentioned variable, against `master` branch
@@ -93,26 +94,22 @@ clippy-nightly:
   stage:                           lint
   <<:                              *docker-env
   <<:                              *test-refs
-  variables:
-    RUSTFLAGS:                     "-D warnings"
   script:
-    - cargo +nightly clippy --all-targets
-  # FIXME: remove when all the warns are fixed
-  allow_failure:                   true
+    - SKIP_WASM_BUILD=1 cargo +nightly clippy --all-targets -- -A clippy::redundant_closure
 
 fmt:
   stage:                           lint
   <<:                              *docker-env
   <<:                              *test-refs
   script:
-    - cargo fmt --all -- --check
+    - cargo +nightly fmt --all -- --check
 
 spellcheck:
   stage:                           lint
   <<:                              *docker-env
   <<:                              *test-refs
   script:
-    - cargo spellcheck check -m 1 -vv $(find modules/currency-exchange/src -name "*.rs")
+    - cargo spellcheck check -vvvv --cfg=.config/spellcheck.toml --checkers hunspell -m 1
 
 #### stage:                        check
 
@@ -121,11 +118,11 @@ check:
   <<:                              *docker-env
   <<:                              *test-refs
   script:                          &check-script
-    - time cargo check --verbose --workspace
+    - SKIP_WASM_BUILD=1 time cargo check --locked --verbose --workspace
     # Check Rialto benchmarks runtime
-    - time cargo check -p rialto-runtime --features runtime-benchmarks --verbose
+    - SKIP_WASM_BUILD=1 time cargo check -p rialto-runtime --locked --features runtime-benchmarks --verbose
     # Check Millau benchmarks runtime
-    - time cargo check -p millau-runtime --features runtime-benchmarks --verbose
+    - SKIP_WASM_BUILD=1 time cargo check -p millau-runtime --locked --features runtime-benchmarks --verbose
 
 check-nightly:
   stage:                           check
@@ -141,8 +138,13 @@ test:
   stage:                           test
   <<:                              *docker-env
   <<:                              *test-refs
+#  variables:
+#    RUSTFLAGS:                     "-D warnings"
   script:                          &test-script
-    - time cargo test --verbose --workspace
+    - time cargo fetch
+    - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output  ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"`
+    - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output  ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"`
+    - CARGO_NET_OFFLINE=true time cargo test --verbose --workspace
 
 test-nightly:
   stage:                           test
@@ -189,12 +191,17 @@ build:
   <<:                              *collect-artifacts
   # master
   script:                          &build-script
-    - time cargo build --release --verbose --workspace
+    - time cargo fetch
+    - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output  ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"`
+    - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output  ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"`
+    - CARGO_NET_OFFLINE=true time cargo build --release --verbose --workspace
   after_script:
     # Prepare artifacts
     - mkdir -p ./artifacts
     - strip ./target/release/rialto-bridge-node
     - mv -v ./target/release/rialto-bridge-node ./artifacts/
+    - strip ./target/release/rialto-parachain-collator
+    - mv -v ./target/release/rialto-parachain-collator ./artifacts/
     - strip ./target/release/millau-bridge-node
     - mv -v ./target/release/millau-bridge-node ./artifacts/
     - strip ./target/release/ethereum-poa-relay
@@ -223,6 +230,9 @@ build-nightly:
     GIT_STRATEGY:                  none
     DOCKERFILE:                    ci.Dockerfile
     IMAGE_NAME:                    docker.io/paritytech/$CI_JOB_NAME
+    VAULT_SERVER_URL:              "https://vault.parity-mgmt-vault.parity.io"
+    VAULT_AUTH_PATH:               "gitlab-parity-io-jwt"
+    VAULT_AUTH_ROLE:               "cicd_gitlab_parity_${CI_PROJECT_NAME}"
   needs:
     - job:                         build
       artifacts:                   true
@@ -233,8 +243,15 @@ build-nightly:
         VERSION=$(echo ${CI_COMMIT_REF_NAME} | sed -r 's#/+#-#g');
       fi
     - echo "Effective tags = ${VERSION} sha-${CI_COMMIT_SHORT_SHA} latest"
+  secrets:
+      DOCKER_HUB_USER:
+        vault:                     cicd/gitlab/parity/DOCKER_HUB_USER@kv
+        file:                      false
+      DOCKER_HUB_PASS:
+        vault:                     cicd/gitlab/parity/DOCKER_HUB_PASS@kv
+        file:                      false
   script:
-    - test "${Docker_Hub_User_Parity}" -a "${Docker_Hub_Pass_Parity}" ||
+    - test "${DOCKER_HUB_USER}" -a "${DOCKER_HUB_PASS}" ||
         ( echo "no docker credentials provided"; exit 1 )
     - cd ./artifacts
     - buildah bud
@@ -248,19 +265,23 @@ build-nightly:
         --tag "${IMAGE_NAME}:latest"
         --file "${DOCKERFILE}" .
     # The job will success only on the protected branch
-    - echo "$Docker_Hub_Pass_Parity" |
-        buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io
+    - echo "${DOCKER_HUB_PASS}" |
+        buildah login --username "${DOCKER_HUB_USER}" --password-stdin docker.io
     - buildah info
     - buildah push --format=v2s2 "${IMAGE_NAME}:${VERSION}"
     - buildah push --format=v2s2 "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}"
     - buildah push --format=v2s2 "${IMAGE_NAME}:latest"
   after_script:
-    - env REGISTRY_AUTH_FILE= buildah logout "$IMAGE_NAME"
+    - env REGISTRY_AUTH_FILE= buildah logout --all
 
 rialto-bridge-node:
   stage:                           publish
   <<:                              *build-push-image
 
+rialto-parachain-collator:
+  stage:                           publish
+  <<:                              *build-push-image
+
 millau-bridge-node:
   stage:                           publish
   <<:                              *build-push-image
diff --git a/polkadot/bridges/.maintain/millau-weight-template.hbs b/polkadot/bridges/.maintain/millau-weight-template.hbs
new file mode 100644
index 0000000000000000000000000000000000000000..7a2a67627bb20c456be413f7b86487cecfd8168b
--- /dev/null
+++ b/polkadot/bridges/.maintain/millau-weight-template.hbs
@@ -0,0 +1,103 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `{{pallet}}`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
+//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}
+//! LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}}
+//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}}
+//! CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}}
+
+// Executed Command:
+{{#each args as |arg|~}}
+// {{arg}}
+{{/each}}
+
+#![allow(clippy::all)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
+use sp_std::marker::PhantomData;
+
+/// Weight functions needed for `{{pallet}}`.
+pub trait WeightInfo {
+	{{~#each benchmarks as |benchmark|}}
+	fn {{benchmark.name~}}
+	(
+		{{~#each benchmark.components as |c| ~}}
+		{{c.name}}: u32, {{/each~}}
+	) -> Weight;
+	{{~/each}}
+}
+
+/// Weights for `{{pallet}}` using the Millau node and recommended hardware.
+pub struct MillauWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for MillauWeight<T> {
+	{{~#each benchmarks as |benchmark|}}
+	fn {{benchmark.name~}}
+	(
+		{{~#each benchmark.components as |c| ~}}
+		{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
+	) -> Weight {
+		({{underscore benchmark.base_weight}} as Weight)
+			{{~#each benchmark.component_weight as |cw|}}
+			.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
+			{{~/each}}
+			{{~#if (ne benchmark.base_reads "0")}}
+			.saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight))
+			{{~/if}}
+			{{~#each benchmark.component_reads as |cr|}}
+			.saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight)))
+			{{~/each}}
+			{{~#if (ne benchmark.base_writes "0")}}
+			.saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight))
+			{{~/if}}
+			{{~#each benchmark.component_writes as |cw|}}
+			.saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)))
+			{{~/each}}
+	}
+	{{~/each}}
+}
+
+// For backwards compatibility and tests
+impl WeightInfo for () {
+	{{~#each benchmarks as |benchmark|}}
+	fn {{benchmark.name~}}
+	(
+		{{~#each benchmark.components as |c| ~}}
+		{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
+	) -> Weight {
+		({{underscore benchmark.base_weight}} as Weight)
+			{{~#each benchmark.component_weight as |cw|}}
+			.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
+			{{~/each}}
+			{{~#if (ne benchmark.base_reads "0")}}
+			.saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight))
+			{{~/if}}
+			{{~#each benchmark.component_reads as |cr|}}
+			.saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight)))
+			{{~/each}}
+			{{~#if (ne benchmark.base_writes "0")}}
+			.saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight))
+			{{~/if}}
+			{{~#each benchmark.component_writes as |cw|}}
+			.saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)))
+			{{~/each}}
+	}
+	{{~/each}}
+}
diff --git a/polkadot/bridges/.maintain/rialto-weight-template.hbs b/polkadot/bridges/.maintain/rialto-weight-template.hbs
index 4868e6c84bb2810f028ef94a8944b0da219363cc..cb1b58d23b26420e9f6d89c9b7fc6ce3b7fa9141 100644
--- a/polkadot/bridges/.maintain/rialto-weight-template.hbs
+++ b/polkadot/bridges/.maintain/rialto-weight-template.hbs
@@ -8,13 +8,14 @@
 
 // Parity Bridges Common is distributed in the hope that it will be useful,
 // but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 // GNU General Public License for more details.
 
 // You should have received a copy of the GNU General Public License
-// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+// along with Parity Bridges Common. If not, see
+<http: //www.gnu.org/licenses />.
 
-//! Autogenerated weights for {{cmd.pallet}}
+//! Autogenerated weights for `{{pallet}}`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}}
 //! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}}
@@ -34,70 +35,74 @@
 use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}};
 use sp_std::marker::PhantomData;
 
-/// Weight functions needed for {{pallet}}.
+/// Weight functions needed for `{{pallet}}`.
 pub trait WeightInfo {
-	{{~#each benchmarks as |benchmark|}}
-	fn {{benchmark.name~}}
-	(
-		{{~#each benchmark.components as |c| ~}}
-		{{c.name}}: u32, {{/each~}}
-	) -> Weight;
-	{{~/each}}
+{{~#each benchmarks as |benchmark|}}
+fn {{benchmark.name~}}
+(
+{{~#each benchmark.components as |c| ~}}
+{{c.name}}: u32, {{/each~}}
+) -> Weight;
+{{~/each}}
 }
 
-/// Weights for {{pallet}} using the Rialto node and recommended hardware.
+/// Weights for `{{pallet}}` using the Rialto node and recommended hardware.
 pub struct RialtoWeight<T>(PhantomData<T>);
-impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
-	{{~#each benchmarks as |benchmark|}}
-	fn {{benchmark.name~}}
-	(
-		{{~#each benchmark.components as |c| ~}}
-		{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
-	) -> Weight {
-		({{underscore benchmark.base_weight}} as Weight)
-			{{~#each benchmark.component_weight as |cw|}}
-			.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
-			{{~/each}}
-			{{~#if (ne benchmark.base_reads "0")}}
-			.saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight))
-			{{~/if}}
-			{{~#each benchmark.component_reads as |cr|}}
-			.saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight)))
-			{{~/each}}
-			{{~#if (ne benchmark.base_writes "0")}}
-			.saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight))
-			{{~/if}}
-			{{~#each benchmark.component_writes as |cw|}}
-			.saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)))
-			{{~/each}}
-	}
-	{{~/each}}
-}
+		impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
+				{{~#each benchmarks as |benchmark|}}
+				fn {{benchmark.name~}}
+				(
+				{{~#each benchmark.components as |c| ~}}
+				{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
+				) -> Weight {
+				({{underscore benchmark.base_weight}} as Weight)
+				{{~#each benchmark.component_weight as |cw|}}
+				.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
+				{{~/each}}
+				{{~#if (ne benchmark.base_reads "0")}}
+				.saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight))
+				{{~/if}}
+				{{~#each benchmark.component_reads as |cr|}}
+				.saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as
+				Weight)))
+				{{~/each}}
+				{{~#if (ne benchmark.base_writes "0")}}
+				.saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight))
+				{{~/if}}
+				{{~#each benchmark.component_writes as |cw|}}
+				.saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as
+				Weight)))
+				{{~/each}}
+				}
+				{{~/each}}
+				}
 
-// For backwards compatibility and tests
-impl WeightInfo for () {
-	{{~#each benchmarks as |benchmark|}}
-	fn {{benchmark.name~}}
-	(
-		{{~#each benchmark.components as |c| ~}}
-		{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
-	) -> Weight {
-		({{underscore benchmark.base_weight}} as Weight)
-			{{~#each benchmark.component_weight as |cw|}}
-			.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
-			{{~/each}}
-			{{~#if (ne benchmark.base_reads "0")}}
-			.saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight))
-			{{~/if}}
-			{{~#each benchmark.component_reads as |cr|}}
-			.saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight)))
-			{{~/each}}
-			{{~#if (ne benchmark.base_writes "0")}}
-			.saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight))
-			{{~/if}}
-			{{~#each benchmark.component_writes as |cw|}}
-			.saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)))
-			{{~/each}}
-	}
-	{{~/each}}
-}
+				// For backwards compatibility and tests
+				impl WeightInfo for () {
+				{{~#each benchmarks as |benchmark|}}
+				fn {{benchmark.name~}}
+				(
+				{{~#each benchmark.components as |c| ~}}
+				{{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}}
+				) -> Weight {
+				({{underscore benchmark.base_weight}} as Weight)
+				{{~#each benchmark.component_weight as |cw|}}
+				.saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))
+				{{~/each}}
+				{{~#if (ne benchmark.base_reads "0")}}
+				.saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight))
+				{{~/if}}
+				{{~#each benchmark.component_reads as |cr|}}
+				.saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as
+				Weight)))
+				{{~/each}}
+				{{~#if (ne benchmark.base_writes "0")}}
+				.saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight))
+				{{~/if}}
+				{{~#each benchmark.component_writes as |cw|}}
+				.saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as
+				Weight)))
+				{{~/each}}
+				}
+				{{~/each}}
+				}
\ No newline at end of file
diff --git a/polkadot/bridges/Cargo.lock b/polkadot/bridges/Cargo.lock
index 86b075028125f626e1c70ca594554bd0fcfbd9c5..9d996238141cb91f1a2791044186953d0cd08b5e 100644
--- a/polkadot/bridges/Cargo.lock
+++ b/polkadot/bridges/Cargo.lock
@@ -14,20 +14,11 @@ dependencies = [
 
 [[package]]
 name = "addr2line"
-version = "0.14.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7"
-dependencies = [
- "gimli 0.23.0",
-]
-
-[[package]]
-name = "addr2line"
-version = "0.15.2"
+version = "0.16.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a"
+checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
 dependencies = [
- "gimli 0.24.0",
+ "gimli",
 ]
 
 [[package]]
@@ -38,72 +29,64 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
 
 [[package]]
 name = "aead"
-version = "0.3.2"
+version = "0.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331"
+checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877"
 dependencies = [
  "generic-array 0.14.4",
 ]
 
 [[package]]
 name = "aes"
-version = "0.5.0"
+version = "0.7.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd2bc6d3f370b5666245ff421e231cba4353df936e26986d2918e61a8fd6aef6"
+checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8"
 dependencies = [
- "aes-soft",
- "aesni",
- "block-cipher",
+ "cfg-if 1.0.0",
+ "cipher",
+ "cpufeatures 0.2.1",
+ "opaque-debug 0.3.0",
 ]
 
 [[package]]
 name = "aes-gcm"
-version = "0.7.0"
+version = "0.9.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0301c9e9c443494d970a07885e8cf3e587bae8356a1d5abd0999068413f7205f"
+checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6"
 dependencies = [
  "aead",
  "aes",
- "block-cipher",
+ "cipher",
+ "ctr",
  "ghash",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
-name = "aes-soft"
-version = "0.5.0"
+name = "ahash"
+version = "0.7.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63dd91889c49327ad7ef3b500fd1109dbd3c509a03db0d4a9ce413b79f575cb6"
+checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98"
 dependencies = [
- "block-cipher",
- "byteorder",
- "opaque-debug 0.3.0",
+ "getrandom 0.2.3",
+ "once_cell",
+ "version_check",
 ]
 
 [[package]]
-name = "aesni"
-version = "0.8.0"
+name = "aho-corasick"
+version = "0.7.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a6fe808308bb07d393e2ea47780043ec47683fcf19cf5efc8ca51c50cc8c68a"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
 dependencies = [
- "block-cipher",
- "opaque-debug 0.3.0",
+ "memchr",
 ]
 
 [[package]]
-name = "ahash"
-version = "0.4.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e"
-
-[[package]]
-name = "aho-corasick"
-version = "0.7.15"
+name = "always-assert"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
-dependencies = [
- "memchr",
-]
+checksum = "fbf688625d06217d5b1bb0ea9d9c44a1635fd0ee3534466388d18203174f4d11"
 
 [[package]]
 name = "ansi_term"
@@ -125,24 +108,24 @@ dependencies = [
 
 [[package]]
 name = "anyhow"
-version = "1.0.38"
+version = "1.0.44"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1"
+checksum = "61604a8f862e1d5c3229fdd78f8b02c68dcf73a4c4b05fd636d12240aaa242c1"
 
 [[package]]
 name = "approx"
-version = "0.3.2"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3"
+checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e"
 dependencies = [
  "num-traits",
 ]
 
 [[package]]
 name = "arbitrary"
-version = "1.0.0"
+version = "1.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "698b65a961a9d730fb45b6b0327e20207810c9f61ee421b082b27ba003f49e2b"
+checksum = "577b08a4acd7b99869f863c50011b01eb73424ccc798ecd996f2e24817adfca7"
 
 [[package]]
 name = "array_tool"
@@ -173,9 +156,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
 
 [[package]]
 name = "arrayvec"
-version = "0.7.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7"
+checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd"
 
 [[package]]
 name = "asn1_der"
@@ -212,16 +195,16 @@ dependencies = [
 
 [[package]]
 name = "async-executor"
-version = "1.4.0"
+version = "1.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146"
+checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965"
 dependencies = [
  "async-task",
  "concurrent-queue",
  "fastrand",
  "futures-lite",
  "once_cell",
- "vec-arena",
+ "slab",
 ]
 
 [[package]]
@@ -242,29 +225,28 @@ dependencies = [
 
 [[package]]
 name = "async-io"
-version = "1.3.1"
+version = "1.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd"
+checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b"
 dependencies = [
  "concurrent-queue",
- "fastrand",
  "futures-lite",
  "libc",
  "log",
- "nb-connect",
  "once_cell",
  "parking",
  "polling",
- "vec-arena",
+ "slab",
+ "socket2 0.4.1",
  "waker-fn",
  "winapi 0.3.9",
 ]
 
 [[package]]
 name = "async-lock"
-version = "2.3.0"
+version = "2.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb"
+checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b"
 dependencies = [
  "event-listener",
 ]
@@ -280,15 +262,16 @@ dependencies = [
 
 [[package]]
 name = "async-process"
-version = "1.0.2"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef37b86e2fa961bae5a4d212708ea0154f904ce31d1a4a7f47e1bbc33a0c040b"
+checksum = "b21b63ab5a0db0369deb913540af2892750e42d949faacc7a61495ac418a1692"
 dependencies = [
  "async-io",
  "blocking",
  "cfg-if 1.0.0",
  "event-listener",
  "futures-lite",
+ "libc",
  "once_cell",
  "signal-hook",
  "winapi 0.3.9",
@@ -296,9 +279,9 @@ dependencies = [
 
 [[package]]
 name = "async-std"
-version = "1.9.0"
+version = "1.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341"
+checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952"
 dependencies = [
  "async-attributes",
  "async-channel",
@@ -306,7 +289,7 @@ dependencies = [
  "async-io",
  "async-lock",
  "async-process",
- "crossbeam-utils 0.8.3",
+ "crossbeam-utils",
  "futures-channel",
  "futures-core",
  "futures-io",
@@ -317,7 +300,7 @@ dependencies = [
  "memchr",
  "num_cpus",
  "once_cell",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
  "pin-utils",
  "slab",
  "wasm-bindgen-futures",
@@ -325,9 +308,9 @@ dependencies = [
 
 [[package]]
 name = "async-std-resolver"
-version = "0.20.1"
+version = "0.20.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f665c56111e244fe38e7708ee10948a4356ad6a548997c21f5a63a0f4e0edc4d"
+checksum = "ed4e2c3da14d8ad45acb1e3191db7a918e9505b6f155b218e70a7c9a1a48c638"
 dependencies = [
  "async-std",
  "async-trait",
@@ -343,24 +326,11 @@ version = "4.0.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0"
 
-[[package]]
-name = "async-tls"
-version = "0.11.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f23d769dbf1838d5df5156e7b1ad404f4c463d1ac2c6aeb6cd943630f8a8400"
-dependencies = [
- "futures-core",
- "futures-io",
- "rustls 0.19.0",
- "webpki 0.21.4",
- "webpki-roots",
-]
-
 [[package]]
 name = "async-trait"
-version = "0.1.50"
+version = "0.1.51"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b98e84bbb4cbcdd97da190ba0c58a1bb0de2c1fdf67d159e192ed766aeca722"
+checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -373,11 +343,11 @@ version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fb4401f0a3622dad2e0763fa79e0eb328bc70fb7dccfdd645341f00d671247d6"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "futures-sink",
  "futures-util",
  "memchr",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
 ]
 
 [[package]]
@@ -386,11 +356,11 @@ version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "futures-sink",
  "futures-util",
  "memchr",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
 ]
 
 [[package]]
@@ -437,18 +407,32 @@ dependencies = [
 
 [[package]]
 name = "backtrace"
-version = "0.3.56"
+version = "0.3.61"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc"
+checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01"
 dependencies = [
- "addr2line 0.14.1",
+ "addr2line",
+ "cc",
  "cfg-if 1.0.0",
  "libc",
  "miniz_oxide",
- "object 0.23.0",
+ "object",
  "rustc-demangle",
 ]
 
+[[package]]
+name = "bae"
+version = "0.1.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ec107f431ee3d8a8e45e6dd117adab769556ef463959e77bf6a4888d5fd500cf"
+dependencies = [
+ "heck",
+ "proc-macro-error 0.4.12",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "base-x"
 version = "0.2.8"
@@ -475,67 +459,143 @@ checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
 
 [[package]]
 name = "beef"
-version = "0.5.0"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409"
+checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736"
+dependencies = [
+ "serde",
+]
+
+[[package]]
+name = "beefy-gadget"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "beefy-primitives",
+ "fnv",
+ "futures 0.3.17",
+ "log",
+ "parity-scale-codec",
+ "parking_lot 0.11.2",
+ "sc-client-api",
+ "sc-keystore",
+ "sc-network",
+ "sc-network-gossip",
+ "sc-utils",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-arithmetic",
+ "sp-blockchain",
+ "sp-core",
+ "sp-keystore",
+ "sp-runtime",
+ "substrate-prometheus-endpoint",
+ "thiserror",
+ "wasm-timer",
+]
+
+[[package]]
+name = "beefy-gadget-rpc"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "beefy-gadget",
+ "beefy-primitives",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "jsonrpc-pubsub",
+ "log",
+ "parity-scale-codec",
+ "sc-rpc",
+ "serde",
+ "sp-core",
+ "sp-runtime",
+]
+
+[[package]]
+name = "beefy-merkle-tree"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+
+[[package]]
+name = "beefy-primitives"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "parity-scale-codec",
+ "scale-info",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
 
 [[package]]
 name = "bincode"
-version = "1.3.2"
+version = "1.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d175dfa69e619905c4c3cdb7c3c203fa3bdd5d51184e3afdb2742c0280493772"
+checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
 dependencies = [
- "byteorder",
  "serde",
 ]
 
 [[package]]
 name = "bindgen"
-version = "0.54.0"
+version = "0.59.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36"
+checksum = "453c49e5950bb0eb63bb3df640e31618846c89d5b7faa54040d76e98e0134375"
 dependencies = [
  "bitflags",
  "cexpr",
- "cfg-if 0.1.10",
  "clang-sys",
- "clap",
- "env_logger 0.7.1",
  "lazy_static",
  "lazycell",
- "log",
  "peeking_take_while",
  "proc-macro2",
  "quote",
  "regex",
  "rustc-hash",
  "shlex",
- "which 3.1.1",
 ]
 
 [[package]]
 name = "bitflags"
-version = "1.2.1"
+version = "1.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
 
 [[package]]
 name = "bitvec"
-version = "0.20.1"
+version = "0.19.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321"
+dependencies = [
+ "funty",
+ "radium 0.5.3",
+ "tap",
+ "wyz",
+]
+
+[[package]]
+name = "bitvec"
+version = "0.20.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5011ffc90248764d7005b0e10c7294f5aa1bd87d9dd7248f4ad475b347c294d"
+checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848"
 dependencies = [
  "funty",
- "radium",
+ "radium 0.6.2",
  "tap",
  "wyz",
 ]
 
 [[package]]
 name = "blake2"
-version = "0.9.1"
+version = "0.9.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4"
+checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174"
 dependencies = [
  "crypto-mac 0.8.0",
  "digest 0.9.0",
@@ -576,9 +636,9 @@ dependencies = [
 
 [[package]]
 name = "blake3"
-version = "0.3.7"
+version = "0.3.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e9ff35b701f3914bdb8fad3368d822c766ef2858b2583198e41639b936f09d3f"
+checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3"
 dependencies = [
  "arrayref",
  "arrayvec 0.5.2",
@@ -598,7 +658,7 @@ dependencies = [
  "block-padding 0.1.5",
  "byte-tools",
  "byteorder",
- "generic-array 0.12.3",
+ "generic-array 0.12.4",
 ]
 
 [[package]]
@@ -611,15 +671,6 @@ dependencies = [
  "generic-array 0.14.4",
 ]
 
-[[package]]
-name = "block-cipher"
-version = "0.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80"
-dependencies = [
- "generic-array 0.14.4",
-]
-
 [[package]]
 name = "block-padding"
 version = "0.1.5"
@@ -649,12 +700,22 @@ dependencies = [
  "once_cell",
 ]
 
+[[package]]
+name = "bounded-vec"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "afdd1dffefe5fc66262a524b91087c43b16e478b2e3dc49eb11b0e2fd6b6ec90"
+dependencies = [
+ "thiserror",
+]
+
 [[package]]
 name = "bp-currency-exchange"
 version = "0.1.0"
 dependencies = [
  "frame-support",
  "parity-scale-codec",
+ "scale-info",
  "sp-api",
  "sp-std",
 ]
@@ -669,12 +730,13 @@ dependencies = [
  "hex-literal 0.2.1",
  "impl-rlp",
  "impl-serde",
- "libsecp256k1",
+ "libsecp256k1 0.7.0",
  "parity-bytes",
  "parity-scale-codec",
  "plain_hasher",
  "primitive-types",
  "rlp",
+ "scale-info",
  "serde",
  "serde-big-array",
  "sp-api",
@@ -693,6 +755,7 @@ dependencies = [
  "finality-grandpa",
  "frame-support",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-core",
  "sp-finality-grandpa",
@@ -707,8 +770,11 @@ dependencies = [
  "bp-messages",
  "bp-polkadot-core",
  "bp-runtime",
+ "frame-support",
+ "smallvec",
  "sp-api",
  "sp-std",
+ "sp-version",
 ]
 
 [[package]]
@@ -718,6 +784,7 @@ dependencies = [
  "bp-runtime",
  "frame-support",
  "parity-scale-codec",
+ "scale-info",
  "sp-std",
 ]
 
@@ -725,12 +792,13 @@ dependencies = [
 name = "bp-messages"
 version = "0.1.0"
 dependencies = [
- "bitvec",
+ "bitvec 0.20.4",
  "bp-runtime",
  "frame-support",
  "frame-system",
  "impl-trait-for-tuples",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-std",
 ]
@@ -747,8 +815,8 @@ dependencies = [
  "hash256-std-hasher",
  "impl-codec",
  "impl-serde",
- "max-encoded-len",
  "parity-util-mem",
+ "scale-info",
  "serde",
  "sp-api",
  "sp-core",
@@ -765,8 +833,11 @@ dependencies = [
  "bp-messages",
  "bp-polkadot-core",
  "bp-runtime",
+ "frame-support",
+ "smallvec",
  "sp-api",
  "sp-std",
+ "sp-version",
 ]
 
 [[package]]
@@ -779,6 +850,7 @@ dependencies = [
  "frame-system",
  "hex",
  "parity-scale-codec",
+ "scale-info",
  "sp-api",
  "sp-core",
  "sp-runtime",
@@ -800,6 +872,20 @@ dependencies = [
  "sp-std",
 ]
 
+[[package]]
+name = "bp-rialto-parachain"
+version = "0.1.0"
+dependencies = [
+ "bp-messages",
+ "bp-runtime",
+ "frame-support",
+ "frame-system",
+ "sp-api",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "bp-rococo"
 version = "0.1.0"
@@ -809,7 +895,7 @@ dependencies = [
  "bp-runtime",
  "frame-support",
  "parity-scale-codec",
- "smallvec 1.6.1",
+ "smallvec",
  "sp-api",
  "sp-runtime",
  "sp-std",
@@ -824,6 +910,7 @@ dependencies = [
  "hash-db",
  "num-traits",
  "parity-scale-codec",
+ "scale-info",
  "sp-core",
  "sp-io",
  "sp-runtime",
@@ -846,6 +933,17 @@ dependencies = [
  "sp-std",
 ]
 
+[[package]]
+name = "bp-token-swap"
+version = "0.1.0"
+dependencies = [
+ "frame-support",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-std",
+]
+
 [[package]]
 name = "bp-westend"
 version = "0.1.0"
@@ -854,7 +952,10 @@ dependencies = [
  "bp-messages",
  "bp-polkadot-core",
  "bp-runtime",
+ "frame-support",
  "parity-scale-codec",
+ "scale-info",
+ "smallvec",
  "sp-api",
  "sp-runtime",
  "sp-std",
@@ -890,6 +991,7 @@ dependencies = [
  "pallet-bridge-messages",
  "pallet-transaction-payment",
  "parity-scale-codec",
+ "scale-info",
  "sp-core",
  "sp-runtime",
  "sp-state-machine",
@@ -905,9 +1007,9 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3"
 
 [[package]]
 name = "bstr"
-version = "0.2.15"
+version = "0.2.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a40b47ad93e1a5404e6c18dec46b628214fee441c70f4ab5d6942142cc268a3d"
+checksum = "90682c8d613ad3373e66de8c6411e0ae2ab2571e879d2efbf73558cc66f21279"
 dependencies = [
  "memchr",
 ]
@@ -923,9 +1025,9 @@ dependencies = [
 
 [[package]]
 name = "bumpalo"
-version = "3.6.1"
+version = "3.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe"
+checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631"
 
 [[package]]
 name = "byte-slice-cast"
@@ -941,9 +1043,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"
 
 [[package]]
 name = "byteorder"
-version = "1.3.4"
+version = "1.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
 
 [[package]]
 name = "bytes"
@@ -952,7 +1054,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c"
 dependencies = [
  "byteorder",
- "either",
  "iovec",
 ]
 
@@ -964,9 +1065,9 @@ checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
 
 [[package]]
 name = "bytes"
-version = "1.0.1"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040"
+checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
 
 [[package]]
 name = "cache-padded"
@@ -976,18 +1077,18 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba"
 
 [[package]]
 name = "camino"
-version = "1.0.4"
+version = "1.0.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4648c6d00a709aa069a236adcaae4f605a6241c72bf5bee79331a4b625921a9"
+checksum = "52d74260d9bf6944e2208aa46841b4b8f0d7ffc0849a06837b2f510337f86b2b"
 dependencies = [
  "serde",
 ]
 
 [[package]]
 name = "cargo-platform"
-version = "0.1.1"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0226944a63d1bf35a3b5f948dd7c59e263db83695c9e8bffc4037de02e30f1d7"
+checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27"
 dependencies = [
  "serde",
 ]
@@ -1008,18 +1109,18 @@ dependencies = [
 
 [[package]]
 name = "cc"
-version = "1.0.67"
+version = "1.0.70"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd"
+checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0"
 dependencies = [
  "jobserver",
 ]
 
 [[package]]
 name = "cexpr"
-version = "0.4.0"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27"
+checksum = "db507a7679252d2276ed0dd8113c6875ec56d3089f9225b2b42c30cc1f8e5c89"
 dependencies = [
  "nom",
 ]
@@ -1036,26 +1137,34 @@ version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
 
+[[package]]
+name = "cfg_aliases"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
+
 [[package]]
 name = "chacha20"
-version = "0.5.0"
+version = "0.7.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "244fbce0d47e97e8ef2f63b81d5e05882cb518c68531eb33194990d7b7e85845"
+checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412"
 dependencies = [
- "stream-cipher",
+ "cfg-if 1.0.0",
+ "cipher",
+ "cpufeatures 0.1.5",
  "zeroize",
 ]
 
 [[package]]
 name = "chacha20poly1305"
-version = "0.6.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9bf18d374d66df0c05cdddd528a7db98f78c28e2519b120855c4f84c5027b1f5"
+checksum = "1580317203210c517b6d44794abfbe600698276db18127e37ad3e69bf5e848e5"
 dependencies = [
  "aead",
  "chacha20",
+ "cipher",
  "poly1305",
- "stream-cipher",
  "zeroize",
 ]
 
@@ -1079,28 +1188,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "ff0e3bc0b6446b3f9663c1a6aba6ef06c5aeaa1bc92bd18077be337198ab9768"
 dependencies = [
  "multibase",
- "multihash",
+ "multihash 0.13.2",
  "unsigned-varint 0.5.1",
 ]
 
 [[package]]
 name = "cipher"
-version = "0.2.5"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801"
+checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7"
 dependencies = [
  "generic-array 0.14.4",
 ]
 
+[[package]]
+name = "ckb-merkle-mountain-range"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b"
+dependencies = [
+ "cfg-if 0.1.10",
+]
+
 [[package]]
 name = "clang-sys"
-version = "0.29.3"
+version = "1.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a"
+checksum = "10612c0ec0e0a1ff0e97980647cb058a6e7aedb913d01d009c406b8b7d0b26ee"
 dependencies = [
  "glob",
  "libc",
- "libloading",
+ "libloading 0.7.0",
 ]
 
 [[package]]
@@ -1149,22 +1267,22 @@ version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc"
 
+[[package]]
+name = "convert_case"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
+
 [[package]]
 name = "core-foundation"
-version = "0.7.0"
+version = "0.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171"
+checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62"
 dependencies = [
- "core-foundation-sys 0.7.0",
+ "core-foundation-sys",
  "libc",
 ]
 
-[[package]]
-name = "core-foundation-sys"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"
-
 [[package]]
 name = "core-foundation-sys"
 version = "0.8.2"
@@ -1173,58 +1291,63 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b"
 
 [[package]]
 name = "cpp_demangle"
-version = "0.3.2"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "44919ecaf6f99e8e737bc239408931c9a01e9a6c74814fee8242dd2506b65390"
+checksum = "8ea47428dc9d2237f3c6bc134472edfd63ebba0af932e783506dcfd66f10d18a"
 dependencies = [
  "cfg-if 1.0.0",
- "glob",
 ]
 
 [[package]]
-name = "cpuid-bool"
-version = "0.1.2"
+name = "cpufeatures"
+version = "0.1.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634"
+checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef"
+dependencies = [
+ "libc",
+]
 
 [[package]]
-name = "cpuid-bool"
-version = "0.2.0"
+name = "cpufeatures"
+version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba"
+checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469"
+dependencies = [
+ "libc",
+]
 
 [[package]]
 name = "cranelift-bforest"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e"
+checksum = "7e6bea67967505247f54fa2c85cf4f6e0e31c4e5692c9b70e4ae58e339067333"
 dependencies = [
  "cranelift-entity",
 ]
 
 [[package]]
 name = "cranelift-codegen"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276"
+checksum = "48194035d2752bdd5bdae429e3ab88676e95f52a2b1355a5d4e809f9e39b1d74"
 dependencies = [
  "cranelift-bforest",
  "cranelift-codegen-meta",
  "cranelift-codegen-shared",
  "cranelift-entity",
- "gimli 0.24.0",
+ "gimli",
  "log",
  "regalloc",
  "serde",
- "smallvec 1.6.1",
+ "smallvec",
  "target-lexicon",
 ]
 
 [[package]]
 name = "cranelift-codegen-meta"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821"
+checksum = "976efb22fcab4f2cd6bd4e9913764616a54d895c1a23530128d04e03633c555f"
 dependencies = [
  "cranelift-codegen-shared",
  "cranelift-entity",
@@ -1232,57 +1355,58 @@ dependencies = [
 
 [[package]]
 name = "cranelift-codegen-shared"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b"
+checksum = "9dabb5fe66e04d4652e434195b45ae65b5c8172d520247b8f66d8df42b2b45dc"
 dependencies = [
  "serde",
 ]
 
 [[package]]
 name = "cranelift-entity"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c"
+checksum = "3329733e4d4b8e91c809efcaa4faee80bf66f20164e3dd16d707346bd3494799"
 dependencies = [
  "serde",
 ]
 
 [[package]]
 name = "cranelift-frontend"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c"
+checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921"
 dependencies = [
  "cranelift-codegen",
  "log",
- "smallvec 1.6.1",
+ "smallvec",
  "target-lexicon",
 ]
 
 [[package]]
 name = "cranelift-native"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a"
+checksum = "4c04d1fe6a5abb5bb0edc78baa8ef238370fb8e389cc88b6d153f7c3e9680425"
 dependencies = [
  "cranelift-codegen",
+ "libc",
  "target-lexicon",
 ]
 
 [[package]]
 name = "cranelift-wasm"
-version = "0.74.0"
+version = "0.76.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89"
+checksum = "e0d260ad44f6fd2c91f7f5097191a2a9e3edcbb36df1fb787b600dad5ea148ec"
 dependencies = [
  "cranelift-codegen",
  "cranelift-entity",
  "cranelift-frontend",
- "itertools 0.10.0",
+ "itertools",
  "log",
  "serde",
- "smallvec 1.6.1",
+ "smallvec",
  "thiserror",
  "wasmparser",
 ]
@@ -1298,93 +1422,44 @@ dependencies = [
 
 [[package]]
 name = "crossbeam-channel"
-version = "0.5.0"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775"
+checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4"
 dependencies = [
  "cfg-if 1.0.0",
- "crossbeam-utils 0.8.3",
+ "crossbeam-utils",
 ]
 
 [[package]]
 name = "crossbeam-deque"
-version = "0.7.3"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285"
+checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
 dependencies = [
- "crossbeam-epoch 0.8.2",
- "crossbeam-utils 0.7.2",
- "maybe-uninit",
+ "cfg-if 1.0.0",
+ "crossbeam-epoch",
+ "crossbeam-utils",
 ]
 
 [[package]]
-name = "crossbeam-deque"
-version = "0.8.0"
+name = "crossbeam-epoch"
+version = "0.9.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9"
+checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd"
 dependencies = [
  "cfg-if 1.0.0",
- "crossbeam-epoch 0.9.3",
- "crossbeam-utils 0.8.3",
-]
-
-[[package]]
-name = "crossbeam-epoch"
-version = "0.8.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace"
-dependencies = [
- "autocfg",
- "cfg-if 0.1.10",
- "crossbeam-utils 0.7.2",
- "lazy_static",
- "maybe-uninit",
- "memoffset 0.5.6",
- "scopeguard",
-]
-
-[[package]]
-name = "crossbeam-epoch"
-version = "0.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12"
-dependencies = [
- "cfg-if 1.0.0",
- "crossbeam-utils 0.8.3",
- "lazy_static",
- "memoffset 0.6.1",
- "scopeguard",
-]
-
-[[package]]
-name = "crossbeam-queue"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570"
-dependencies = [
- "cfg-if 0.1.10",
- "crossbeam-utils 0.7.2",
- "maybe-uninit",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8"
-dependencies = [
- "autocfg",
- "cfg-if 0.1.10",
- "lazy_static",
+ "crossbeam-utils",
+ "lazy_static",
+ "memoffset",
+ "scopeguard",
 ]
 
 [[package]]
 name = "crossbeam-utils"
-version = "0.8.3"
+version = "0.8.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49"
+checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db"
 dependencies = [
- "autocfg",
  "cfg-if 1.0.0",
  "lazy_static",
 ]
@@ -1397,43 +1472,52 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
 
 [[package]]
 name = "crypto-mac"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5"
+checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
 dependencies = [
- "generic-array 0.12.3",
- "subtle 1.0.0",
+ "generic-array 0.14.4",
+ "subtle",
 ]
 
 [[package]]
 name = "crypto-mac"
-version = "0.8.0"
+version = "0.11.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab"
+checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714"
 dependencies = [
  "generic-array 0.14.4",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
 name = "ct-logs"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c8e13110a84b6315df212c045be706af261fd364791cad863285439ebba672e"
+checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8"
 dependencies = [
  "sct",
 ]
 
 [[package]]
 name = "ctor"
-version = "0.1.19"
+version = "0.1.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8f45d9ad417bcef4817d614a501ab55cdd96a6fdb24f49aab89a54acfd66b19"
+checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa"
 dependencies = [
  "quote",
  "syn",
 ]
 
+[[package]]
+name = "ctr"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea"
+dependencies = [
+ "cipher",
+]
+
 [[package]]
 name = "cuckoofilter"
 version = "0.5.0"
@@ -1446,145 +1530,507 @@ dependencies = [
 ]
 
 [[package]]
-name = "curl"
-version = "0.4.35"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a872858e9cb9e3b96c80dd78774ad9e32e44d3b05dc31e142b858d14aebc82c"
+name = "cumulus-client-cli"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "curl-sys",
- "libc",
- "openssl-probe",
- "openssl-sys",
- "schannel",
- "socket2 0.3.19",
- "winapi 0.3.9",
+ "sc-cli",
+ "sc-service",
+ "structopt",
 ]
 
 [[package]]
-name = "curl-sys"
-version = "0.4.41+curl-7.75.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ec466abd277c7cab2905948f3e94d10bc4963f1f5d47921c1cc4ffd2028fe65"
+name = "cumulus-client-collator"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "cc",
- "libc",
- "libnghttp2-sys",
- "libz-sys",
- "openssl-sys",
- "pkg-config",
- "vcpkg",
- "winapi 0.3.9",
+ "cumulus-client-consensus-common",
+ "cumulus-client-network",
+ "cumulus-primitives-core",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "parking_lot 0.10.2",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "sc-client-api",
+ "sp-api",
+ "sp-consensus",
+ "sp-core",
+ "sp-runtime",
+ "tracing",
 ]
 
 [[package]]
-name = "curve25519-dalek"
-version = "2.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8"
+name = "cumulus-client-consensus-aura"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "byteorder",
- "digest 0.8.1",
- "rand_core 0.5.1",
- "subtle 2.4.0",
- "zeroize",
+ "async-trait",
+ "cumulus-client-consensus-common",
+ "cumulus-primitives-core",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-client",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-consensus-aura",
+ "sc-consensus-slots",
+ "sc-telemetry",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-aura",
+ "sp-core",
+ "sp-inherents",
+ "sp-keystore",
+ "sp-runtime",
+ "substrate-prometheus-endpoint",
+ "tracing",
 ]
 
 [[package]]
-name = "curve25519-dalek"
-version = "3.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f"
+name = "cumulus-client-consensus-common"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "byteorder",
- "digest 0.9.0",
- "rand_core 0.5.1",
- "subtle 2.4.0",
- "zeroize",
+ "async-trait",
+ "dyn-clone",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "sc-client-api",
+ "sc-consensus",
+ "sp-api",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-runtime",
+ "sp-trie",
+ "tracing",
 ]
 
 [[package]]
-name = "data-encoding"
-version = "2.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
-
-[[package]]
-name = "data-encoding-macro"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0a94feec3d2ba66c0b6621bca8bc6f68415b1e5c69af3586fdd0af9fd9f29b17"
+name = "cumulus-client-network"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "data-encoding",
- "data-encoding-macro-internal",
+ "derive_more",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "parity-scale-codec",
+ "parking_lot 0.10.2",
+ "polkadot-client",
+ "polkadot-node-primitives",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "sc-client-api",
+ "sp-api",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-core",
+ "sp-runtime",
+ "tracing",
 ]
 
 [[package]]
-name = "data-encoding-macro-internal"
-version = "0.1.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f0f83e699727abca3c56e187945f303389590305ab2f0185ea445aa66e8d5f2a"
+name = "cumulus-client-pov-recovery"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "data-encoding",
- "syn",
+ "cumulus-primitives-core",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "parity-scale-codec",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "sc-client-api",
+ "sc-consensus",
+ "sp-api",
+ "sp-consensus",
+ "sp-maybe-compressed-blob",
+ "sp-runtime",
+ "tracing",
 ]
 
 [[package]]
-name = "derive_more"
-version = "0.99.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c"
+name = "cumulus-client-service"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "cumulus-client-collator",
+ "cumulus-client-consensus-common",
+ "cumulus-client-pov-recovery",
+ "cumulus-primitives-core",
+ "parity-scale-codec",
+ "parking_lot 0.10.2",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "polkadot-service",
+ "sc-chain-spec",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-consensus-babe",
+ "sc-service",
+ "sc-telemetry",
+ "sc-tracing",
+ "sp-api",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-core",
+ "sp-runtime",
+ "tracing",
 ]
 
 [[package]]
-name = "digest"
-version = "0.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"
+name = "cumulus-pallet-aura-ext"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "generic-array 0.12.3",
+ "frame-executive",
+ "frame-support",
+ "frame-system",
+ "pallet-aura",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-application-crypto",
+ "sp-consensus-aura",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "digest"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+name = "cumulus-pallet-dmp-queue"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "generic-array 0.14.4",
+ "cumulus-primitives-core",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+ "xcm-executor",
 ]
 
 [[package]]
-name = "directories"
-version = "3.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f"
+name = "cumulus-pallet-parachain-system"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "dirs-sys",
+ "cumulus-pallet-parachain-system-proc-macro",
+ "cumulus-primitives-core",
+ "cumulus-primitives-parachain-inherent",
+ "environmental",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-balances",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-externalities",
+ "sp-inherents",
+ "sp-io",
+ "sp-runtime",
+ "sp-state-machine",
+ "sp-std",
+ "sp-trie",
+ "sp-version",
+ "xcm",
 ]
 
 [[package]]
-name = "directories-next"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc"
+name = "cumulus-pallet-parachain-system-proc-macro"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "cfg-if 1.0.0",
- "dirs-sys-next",
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
-name = "dirs-sys"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a"
+name = "cumulus-pallet-xcm"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
 dependencies = [
- "libc",
- "redox_users 0.3.5",
- "winapi 0.3.9",
+ "cumulus-primitives-core",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+]
+
+[[package]]
+name = "cumulus-pallet-xcmp-queue"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "cumulus-primitives-core",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "rand_chacha 0.3.1",
+ "scale-info",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+ "xcm-executor",
+]
+
+[[package]]
+name = "cumulus-primitives-core"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "frame-support",
+ "impl-trait-for-tuples",
+ "parity-scale-codec",
+ "polkadot-core-primitives",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "sp-api",
+ "sp-runtime",
+ "sp-std",
+ "sp-trie",
+]
+
+[[package]]
+name = "cumulus-primitives-parachain-inherent"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "async-trait",
+ "cumulus-primitives-core",
+ "cumulus-test-relay-sproof-builder",
+ "parity-scale-codec",
+ "polkadot-client",
+ "sc-client-api",
+ "scale-info",
+ "sp-api",
+ "sp-core",
+ "sp-inherents",
+ "sp-runtime",
+ "sp-state-machine",
+ "sp-std",
+ "sp-trie",
+ "tracing",
+]
+
+[[package]]
+name = "cumulus-primitives-timestamp"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "cumulus-primitives-core",
+ "sp-inherents",
+ "sp-std",
+ "sp-timestamp",
+]
+
+[[package]]
+name = "cumulus-primitives-utility"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "cumulus-primitives-core",
+ "frame-support",
+ "parity-scale-codec",
+ "polkadot-core-primitives",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "sp-runtime",
+ "sp-std",
+ "sp-trie",
+ "xcm",
+]
+
+[[package]]
+name = "cumulus-test-relay-sproof-builder"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "cumulus-primitives-core",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "sp-runtime",
+ "sp-state-machine",
+ "sp-std",
+]
+
+[[package]]
+name = "curl"
+version = "0.4.38"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "003cb79c1c6d1c93344c7e1201bb51c2148f24ec2bd9c253709d6b2efb796515"
+dependencies = [
+ "curl-sys",
+ "libc",
+ "openssl-probe",
+ "openssl-sys",
+ "schannel",
+ "socket2 0.4.1",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "curl-sys"
+version = "0.4.45+curl-7.78.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "de9e5a72b1c744eb5dd20b2be4d7eb84625070bb5c4ab9b347b70464ab1e62eb"
+dependencies = [
+ "cc",
+ "libc",
+ "libnghttp2-sys",
+ "libz-sys",
+ "openssl-sys",
+ "pkg-config",
+ "vcpkg",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "curve25519-dalek"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216"
+dependencies = [
+ "byteorder",
+ "digest 0.8.1",
+ "rand_core 0.5.1",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "curve25519-dalek"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61"
+dependencies = [
+ "byteorder",
+ "digest 0.9.0",
+ "rand_core 0.5.1",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "data-encoding"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
+
+[[package]]
+name = "data-encoding-macro"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "86927b7cd2fe88fa698b87404b287ab98d1a0063a34071d92e575b72d3029aca"
+dependencies = [
+ "data-encoding",
+ "data-encoding-macro-internal",
+]
+
+[[package]]
+name = "data-encoding-macro-internal"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a5bbed42daaa95e780b60a50546aa345b8413a1e46f9a40a12907d3598f038db"
+dependencies = [
+ "data-encoding",
+ "syn",
+]
+
+[[package]]
+name = "derivative"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "derive_more"
+version = "0.99.16"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "40eebddd2156ce1bb37b20bbe5151340a31828b1f2d22ba4141f3531710e38df"
+dependencies = [
+ "convert_case",
+ "proc-macro2",
+ "quote",
+ "rustc_version 0.3.3",
+ "syn",
+]
+
+[[package]]
+name = "digest"
+version = "0.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"
+dependencies = [
+ "generic-array 0.12.4",
+]
+
+[[package]]
+name = "digest"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066"
+dependencies = [
+ "generic-array 0.14.4",
+]
+
+[[package]]
+name = "directories"
+version = "3.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7"
+dependencies = [
+ "dirs-sys",
+]
+
+[[package]]
+name = "directories-next"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc"
+dependencies = [
+ "cfg-if 1.0.0",
+ "dirs-sys-next",
+]
+
+[[package]]
+name = "dirs-sys"
+version = "0.3.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780"
+dependencies = [
+ "libc",
+ "redox_users",
+ "winapi 0.3.9",
 ]
 
 [[package]]
@@ -1594,7 +2040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
 dependencies = [
  "libc",
- "redox_users 0.4.0",
+ "redox_users",
  "winapi 0.3.9",
 ]
 
@@ -1655,9 +2101,9 @@ checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf"
 
 [[package]]
 name = "ed25519"
-version = "1.0.3"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef"
+checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc"
 dependencies = [
  "signature",
 ]
@@ -1668,11 +2114,11 @@ version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d"
 dependencies = [
- "curve25519-dalek 3.0.2",
+ "curve25519-dalek 3.2.0",
  "ed25519",
  "rand 0.7.3",
  "serde",
- "sha2 0.9.3",
+ "sha2 0.9.8",
  "zeroize",
 ]
 
@@ -1703,6 +2149,37 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "enumflags2"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "83c8d82922337cd23a15f88b70d8e4ef5f11da38dd7cdb55e84dd5de99695da0"
+dependencies = [
+ "enumflags2_derive",
+]
+
+[[package]]
+name = "enumflags2_derive"
+version = "0.6.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "enumn"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4e58b112d5099aa0857c5d05f0eacab86406dd8c0f85fe5d320a13256d29ecf4"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "env_logger"
 version = "0.7.1"
@@ -1718,9 +2195,22 @@ dependencies = [
 
 [[package]]
 name = "env_logger"
-version = "0.8.3"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3"
+dependencies = [
+ "atty",
+ "humantime 2.1.0",
+ "log",
+ "regex",
+ "termcolor",
+]
+
+[[package]]
+name = "env_logger"
+version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f"
+checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
 dependencies = [
  "atty",
  "humantime 2.1.0",
@@ -1737,9 +2227,9 @@ checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797"
 
 [[package]]
 name = "erased-serde"
-version = "0.3.13"
+version = "0.3.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c"
+checksum = "3de9ad4541d99dc22b59134e7ff8dc3d6c988c89ecd7324bf10a8362b07a2afa"
 dependencies = [
  "serde",
 ]
@@ -1768,7 +2258,7 @@ dependencies = [
 [[package]]
 name = "ethabi"
 version = "14.0.0"
-source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553"
+source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8"
 dependencies = [
  "anyhow",
  "ethereum-types",
@@ -1783,12 +2273,12 @@ dependencies = [
 [[package]]
 name = "ethabi-contract"
 version = "11.0.0"
-source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553"
+source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8"
 
 [[package]]
 name = "ethabi-derive"
 version = "14.0.0"
-source = "git+https://github.com/paritytech/ethabi.git?branch=td-eth-types-11#fe76a0547de3785e40215da7aa10b334e7a6e553"
+source = "git+https://github.com/svyatonik/ethabi.git?branch=bump-deps#19bb6ea4a8099af1d70ab8c0ddcd3dec8fa45ed8"
 dependencies = [
  "anyhow",
  "ethabi",
@@ -1844,47 +2334,41 @@ dependencies = [
 name = "ethereum-poa-relay"
 version = "0.1.0"
 dependencies = [
- "ansi_term 0.12.1",
+ "anyhow",
  "async-std",
  "async-trait",
  "bp-currency-exchange",
  "bp-eth-poa",
  "clap",
- "env_logger 0.8.3",
  "ethabi",
  "ethabi-contract",
  "ethabi-derive",
  "exchange-relay",
- "frame-system",
- "futures 0.3.13",
+ "futures 0.3.17",
  "headers-relay",
  "hex",
- "hex-literal 0.3.1",
- "libsecp256k1",
+ "hex-literal 0.3.3",
+ "libsecp256k1 0.7.0",
  "log",
- "messages-relay",
  "num-traits",
- "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-ethereum-client",
  "relay-rialto-client",
  "relay-substrate-client",
  "relay-utils",
  "rialto-runtime",
- "serde",
  "serde_json",
  "sp-core",
  "sp-keyring",
  "sp-runtime",
- "substrate-prometheus-endpoint",
- "time 0.2.25",
+ "thiserror",
 ]
 
 [[package]]
 name = "ethereum-types"
-version = "0.11.0"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f64b5df66a228d85e4b17e5d6c6aa43b0310898ffe8a85988c4c032357aaabfd"
+checksum = "0dd6bde671199089e601e8d47e153368b893ef885f11f365a3261ec58153c211"
 dependencies = [
  "ethbloom 0.11.0",
  "fixed-hash",
@@ -1904,14 +2388,16 @@ checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59"
 name = "exchange-relay"
 version = "0.1.0"
 dependencies = [
+ "anyhow",
  "async-std",
  "async-trait",
  "backoff",
- "futures 0.3.13",
+ "futures 0.3.17",
  "log",
  "num-traits",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "relay-utils",
+ "thiserror",
 ]
 
 [[package]]
@@ -1920,29 +2406,7 @@ version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5"
 dependencies = [
- "futures 0.3.13",
-]
-
-[[package]]
-name = "failure"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86"
-dependencies = [
- "backtrace",
- "failure_derive",
-]
-
-[[package]]
-name = "failure_derive"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn",
- "synstructure",
+ "futures 0.3.17",
 ]
 
 [[package]]
@@ -1959,9 +2423,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
 
 [[package]]
 name = "fastrand"
-version = "1.4.0"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3"
+checksum = "b394ed3d285a429378d3b384b9eb1285267e7df4b166df24b7a6939a04dc392e"
 dependencies = [
  "instant",
 ]
@@ -1987,17 +2451,18 @@ dependencies = [
 
 [[package]]
 name = "finality-grandpa"
-version = "0.14.1"
+version = "0.14.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "74a1bfdcc776e63e49f741c7ce6116fa1b887e8ac2e3ccb14dd4aa113e54feb9"
+checksum = "e8ac3ff5224ef91f3c97e03eb1de2db82743427e91aaa5ac635f454f0b164f5a"
 dependencies = [
  "either",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "log",
  "num-traits",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
+ "scale-info",
 ]
 
 [[package]]
@@ -2008,11 +2473,11 @@ dependencies = [
  "async-trait",
  "backoff",
  "bp-header-chain",
- "futures 0.3.13",
+ "futures 0.3.17",
  "headers-relay",
  "log",
  "num-traits",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "relay-utils",
 ]
 
@@ -2023,7 +2488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c"
 dependencies = [
  "byteorder",
- "rand 0.8.3",
+ "rand 0.8.4",
  "rustc-hex",
  "static_assertions",
 ]
@@ -2036,9 +2501,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
 
 [[package]]
 name = "flate2"
-version = "1.0.20"
+version = "1.0.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0"
+checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f"
 dependencies = [
  "cfg-if 1.0.0",
  "crc32fast",
@@ -2048,27 +2513,30 @@ dependencies = [
 ]
 
 [[package]]
-name = "flume"
-version = "0.10.2"
+name = "fnv"
+version = "1.0.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+
+[[package]]
+name = "foreign-types"
+version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "531a685ab99b8f60a271b44d5dd1a76e55124a8c9fa0407b7a8e9cd172d5b588"
+checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
 dependencies = [
- "futures-core",
- "futures-sink",
- "pin-project 1.0.5",
- "spinning_top",
+ "foreign-types-shared",
 ]
 
 [[package]]
-name = "fnv"
-version = "1.0.7"
+name = "foreign-types-shared"
+version = "0.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
+checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
 
 [[package]]
 name = "fork-tree"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
 ]
@@ -2085,15 +2553,16 @@ dependencies = [
 
 [[package]]
 name = "frame-benchmarking"
-version = "3.1.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
  "frame-system",
  "linregress",
  "log",
  "parity-scale-codec",
- "paste 1.0.4",
+ "paste",
+ "scale-info",
  "sp-api",
  "sp-io",
  "sp-runtime",
@@ -2104,13 +2573,16 @@ dependencies = [
 
 [[package]]
 name = "frame-benchmarking-cli"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "Inflector",
  "chrono",
  "frame-benchmarking",
+ "frame-support",
  "handlebars",
+ "linked-hash-map",
+ "log",
  "parity-scale-codec",
  "sc-cli",
  "sc-client-db",
@@ -2125,14 +2597,29 @@ dependencies = [
  "structopt",
 ]
 
+[[package]]
+name = "frame-election-provider-support"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-arithmetic",
+ "sp-npos-elections",
+ "sp-std",
+]
+
 [[package]]
 name = "frame-executive"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
  "frame-system",
  "parity-scale-codec",
+ "scale-info",
  "sp-core",
  "sp-io",
  "sp-runtime",
@@ -2142,31 +2629,32 @@ dependencies = [
 
 [[package]]
 name = "frame-metadata"
-version = "13.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "14.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96616f82e069102b95a72c87de4c84d2f87ef7f0f20630e78ce3824436483110"
 dependencies = [
+ "cfg-if 1.0.0",
  "parity-scale-codec",
+ "scale-info",
  "serde",
- "sp-core",
- "sp-std",
 ]
 
 [[package]]
 name = "frame-support"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "bitflags",
  "frame-metadata",
  "frame-support-procedural",
  "impl-trait-for-tuples",
  "log",
- "max-encoded-len",
  "once_cell",
  "parity-scale-codec",
- "paste 1.0.4",
+ "paste",
+ "scale-info",
  "serde",
- "smallvec 1.6.1",
+ "smallvec",
  "sp-arithmetic",
  "sp-core",
  "sp-inherents",
@@ -2180,8 +2668,8 @@ dependencies = [
 
 [[package]]
 name = "frame-support-procedural"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "Inflector",
  "frame-support-procedural-tools",
@@ -2192,11 +2680,11 @@ dependencies = [
 
 [[package]]
 name = "frame-support-procedural-tools"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support-procedural-tools-derive",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -2205,7 +2693,7 @@ dependencies = [
 [[package]]
 name = "frame-support-procedural-tools-derive"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -2214,13 +2702,13 @@ dependencies = [
 
 [[package]]
 name = "frame-system"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
- "impl-trait-for-tuples",
  "log",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-core",
  "sp-io",
@@ -2229,15 +2717,47 @@ dependencies = [
  "sp-version",
 ]
 
+[[package]]
+name = "frame-system-benchmarking"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "frame-system-rpc-runtime-api"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
  "sp-api",
 ]
 
+[[package]]
+name = "frame-try-runtime"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-support",
+ "sp-api",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "fs-err"
+version = "2.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ebd3504ad6116843b8375ad70df74e7bfe83cac77a1f3fe73200c844d43bfe0"
+
 [[package]]
 name = "fs-swap"
 version = "0.2.6"
@@ -2246,7 +2766,7 @@ checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5"
 dependencies = [
  "lazy_static",
  "libc",
- "libloading",
+ "libloading 0.5.2",
  "winapi 0.3.9",
 ]
 
@@ -2260,12 +2780,6 @@ dependencies = [
  "winapi 0.3.9",
 ]
 
-[[package]]
-name = "fuchsia-cprng"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba"
-
 [[package]]
 name = "fuchsia-zircon"
 version = "0.3.3"
@@ -2296,9 +2810,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678"
 
 [[package]]
 name = "futures"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f55667319111d593ba876406af7c409c0ebb44dc4be6132a783ccf163ea14c1"
+checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca"
 dependencies = [
  "futures-channel",
  "futures-core",
@@ -2311,9 +2825,9 @@ dependencies = [
 
 [[package]]
 name = "futures-channel"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c2dd2df839b57db9ab69c2c9d8f3e8c81984781937fe2807dc6dcf3b2ad2939"
+checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888"
 dependencies = [
  "futures-core",
  "futures-sink",
@@ -2321,25 +2835,15 @@ dependencies = [
 
 [[package]]
 name = "futures-core"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "15496a72fabf0e62bdc3df11a59a3787429221dd0710ba8ef163d6f7a9112c94"
-
-[[package]]
-name = "futures-cpupool"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"
-dependencies = [
- "futures 0.1.31",
- "num_cpus",
-]
+checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d"
 
 [[package]]
 name = "futures-executor"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "891a4b7b96d84d5940084b2a37632dd65deeae662c114ceaa2c879629c9c0ad1"
+checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c"
 dependencies = [
  "futures-core",
  "futures-task",
@@ -2349,31 +2853,32 @@ dependencies = [
 
 [[package]]
 name = "futures-io"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d71c2c65c57704c32f5241c1223167c2c3294fd34ac020c807ddbe6db287ba59"
+checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377"
 
 [[package]]
 name = "futures-lite"
-version = "1.11.3"
+version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb"
+checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48"
 dependencies = [
  "fastrand",
  "futures-core",
  "futures-io",
  "memchr",
  "parking",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
  "waker-fn",
 ]
 
 [[package]]
 name = "futures-macro"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea405816a5139fb39af82c2beb921d52143f556038378d6db21183a5c37fbfb7"
+checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb"
 dependencies = [
+ "autocfg",
  "proc-macro-hack",
  "proc-macro2",
  "quote",
@@ -2387,21 +2892,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "3a1387e07917c711fb4ee4f48ea0adb04a3c9739e53ef85bf43ae1edc2937a8b"
 dependencies = [
  "futures-io",
- "rustls 0.19.0",
- "webpki 0.21.4",
+ "rustls",
+ "webpki",
 ]
 
 [[package]]
 name = "futures-sink"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85754d98985841b7d4f5e8e6fbfa4a4ac847916893ec511a2917ccd8525b8bb3"
+checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11"
 
 [[package]]
 name = "futures-task"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa189ef211c15ee602667a6fcfe1c1fd9e07d42250d2156382820fba33c9df80"
+checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99"
 
 [[package]]
 name = "futures-timer"
@@ -2417,10 +2922,11 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c"
 
 [[package]]
 name = "futures-util"
-version = "0.3.13"
+version = "0.3.17"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1812c7ab8aedf8d6f2701a43e1243acdbcc2b36ab26e2ad421eb99ac963d96d1"
+checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481"
 dependencies = [
+ "autocfg",
  "futures 0.1.31",
  "futures-channel",
  "futures-core",
@@ -2429,7 +2935,7 @@ dependencies = [
  "futures-sink",
  "futures-task",
  "memchr",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
  "pin-utils",
  "proc-macro-hack",
  "proc-macro-nested",
@@ -2444,18 +2950,9 @@ checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2"
 
 [[package]]
 name = "generic-array"
-version = "0.12.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec"
-dependencies = [
- "typenum",
-]
-
-[[package]]
-name = "generic-array"
-version = "0.13.2"
+version = "0.12.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd"
+checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd"
 dependencies = [
  "typenum",
 ]
@@ -2477,15 +2974,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
 dependencies = [
  "cfg-if 1.0.0",
+ "js-sys",
  "libc",
  "wasi 0.9.0+wasi-snapshot-preview1",
+ "wasm-bindgen",
 ]
 
 [[package]]
 name = "getrandom"
-version = "0.2.2"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8"
+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
 dependencies = [
  "cfg-if 1.0.0",
  "libc",
@@ -2494,9 +2993,9 @@ dependencies = [
 
 [[package]]
 name = "ghash"
-version = "0.3.1"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97304e4cd182c3846f7575ced3890c53012ce534ad9114046b0a9e00bb30a375"
+checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99"
 dependencies = [
  "opaque-debug 0.3.0",
  "polyval",
@@ -2504,15 +3003,9 @@ dependencies = [
 
 [[package]]
 name = "gimli"
-version = "0.23.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce"
-
-[[package]]
-name = "gimli"
-version = "0.24.0"
+version = "0.25.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189"
+checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
 dependencies = [
  "fallible-iterator",
  "indexmap",
@@ -2527,9 +3020,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
 
 [[package]]
 name = "globset"
-version = "0.4.6"
+version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c152169ef1e421390738366d2f796655fec62621dabbd0fd476f905934061e4a"
+checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd"
 dependencies = [
  "aho-corasick",
  "bstr",
@@ -2553,52 +3046,33 @@ dependencies = [
 
 [[package]]
 name = "h2"
-version = "0.1.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462"
-dependencies = [
- "byteorder",
- "bytes 0.4.12",
- "fnv",
- "futures 0.1.31",
- "http 0.1.21",
- "indexmap",
- "log",
- "slab",
- "string",
- "tokio-io",
-]
-
-[[package]]
-name = "h2"
-version = "0.2.7"
+version = "0.3.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e4728fd124914ad25e99e3d15a9361a879f6620f63cb56bbb08f95abb97a535"
+checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472"
 dependencies = [
- "bytes 0.5.6",
+ "bytes 1.1.0",
  "fnv",
  "futures-core",
  "futures-sink",
  "futures-util",
- "http 0.2.3",
+ "http",
  "indexmap",
  "slab",
- "tokio 0.2.25",
+ "tokio",
  "tokio-util",
  "tracing",
- "tracing-futures",
 ]
 
 [[package]]
 name = "handlebars"
-version = "3.5.3"
+version = "3.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb0867bbc5a3da37a753e78021d5fcf8a4db00e18dd2dd90fd36e24190e162d"
+checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3"
 dependencies = [
  "log",
  "pest",
  "pest_derive",
- "quick-error 2.0.0",
+ "quick-error 2.0.1",
  "serde",
  "serde_json",
 ]
@@ -2620,13 +3094,38 @@ dependencies = [
 
 [[package]]
 name = "hashbrown"
-version = "0.9.1"
+version = "0.11.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
 dependencies = [
  "ahash",
 ]
 
+[[package]]
+name = "headers"
+version = "0.3.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0b7591fb62902706ae8e7aaff416b1b0fa2c0fd0878b46dc13baa3712d8a855"
+dependencies = [
+ "base64 0.13.0",
+ "bitflags",
+ "bytes 1.1.0",
+ "headers-core",
+ "http",
+ "mime",
+ "sha-1 0.9.8",
+ "time 0.1.44",
+]
+
+[[package]]
+name = "headers-core"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429"
+dependencies = [
+ "http",
+]
+
 [[package]]
 name = "headers-relay"
 version = "0.1.0"
@@ -2634,28 +3133,28 @@ dependencies = [
  "async-std",
  "async-trait",
  "backoff",
- "futures 0.3.13",
+ "futures 0.3.17",
  "linked-hash-map",
  "log",
  "num-traits",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "relay-utils",
 ]
 
 [[package]]
 name = "heck"
-version = "0.3.2"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac"
+checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c"
 dependencies = [
  "unicode-segmentation",
 ]
 
 [[package]]
 name = "hermit-abi"
-version = "0.1.18"
+version = "0.1.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
 dependencies = [
  "libc",
 ]
@@ -2678,9 +3177,9 @@ dependencies = [
 
 [[package]]
 name = "hex-literal"
-version = "0.3.1"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8"
+checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b"
 
 [[package]]
 name = "hex-literal-impl"
@@ -2699,33 +3198,33 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f"
 
 [[package]]
 name = "hmac"
-version = "0.7.1"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695"
+checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840"
 dependencies = [
- "crypto-mac 0.7.0",
- "digest 0.8.1",
+ "crypto-mac 0.8.0",
+ "digest 0.9.0",
 ]
 
 [[package]]
 name = "hmac"
-version = "0.8.1"
+version = "0.11.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840"
+checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b"
 dependencies = [
- "crypto-mac 0.8.0",
+ "crypto-mac 0.11.1",
  "digest 0.9.0",
 ]
 
 [[package]]
 name = "hmac-drbg"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b"
+checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1"
 dependencies = [
- "digest 0.8.1",
- "generic-array 0.12.3",
- "hmac 0.7.1",
+ "digest 0.9.0",
+ "generic-array 0.14.4",
+ "hmac 0.8.1",
 ]
 
 [[package]]
@@ -2752,59 +3251,37 @@ dependencies = [
 
 [[package]]
 name = "http"
-version = "0.1.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6ccf5ede3a895d8856620237b2f02972c1bbc78d2965ad7fe8838d4a0ed41f0"
-dependencies = [
- "bytes 0.4.12",
- "fnv",
- "itoa",
-]
-
-[[package]]
-name = "http"
-version = "0.2.3"
+version = "0.2.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747"
+checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "fnv",
  "itoa",
 ]
 
 [[package]]
 name = "http-body"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "http 0.1.21",
- "tokio-buf",
-]
-
-[[package]]
-name = "http-body"
-version = "0.3.1"
+version = "0.4.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"
+checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5"
 dependencies = [
- "bytes 0.5.6",
- "http 0.2.3",
+ "bytes 1.1.0",
+ "http",
+ "pin-project-lite 0.2.7",
 ]
 
 [[package]]
 name = "httparse"
-version = "1.3.5"
+version = "1.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "615caabe2c3160b313d52ccc905335f4ed5f10881dd63dc5699d47e90be85691"
+checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503"
 
 [[package]]
 name = "httpdate"
-version = "0.3.2"
+version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47"
+checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440"
 
 [[package]]
 name = "humantime"
@@ -2823,74 +3300,56 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
 
 [[package]]
 name = "hyper"
-version = "0.12.36"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c843caf6296fc1f93444735205af9ed4e109a539005abb2564ae1d6fad34c52"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "futures-cpupool",
- "h2 0.1.26",
- "http 0.1.21",
- "http-body 0.1.0",
- "httparse",
- "iovec",
- "itoa",
- "log",
- "net2",
- "rustc_version",
- "time 0.1.44",
- "tokio 0.1.22",
- "tokio-buf",
- "tokio-executor",
- "tokio-io",
- "tokio-reactor",
- "tokio-tcp",
- "tokio-threadpool",
- "tokio-timer",
- "want 0.2.0",
-]
-
-[[package]]
-name = "hyper"
-version = "0.13.10"
+version = "0.14.12"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb"
+checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd"
 dependencies = [
- "bytes 0.5.6",
+ "bytes 1.1.0",
  "futures-channel",
  "futures-core",
  "futures-util",
- "h2 0.2.7",
- "http 0.2.3",
- "http-body 0.3.1",
+ "h2",
+ "http",
+ "http-body",
  "httparse",
  "httpdate",
  "itoa",
- "pin-project 1.0.5",
- "socket2 0.3.19",
- "tokio 0.2.25",
+ "pin-project-lite 0.2.7",
+ "socket2 0.4.1",
+ "tokio",
  "tower-service",
  "tracing",
- "want 0.3.0",
+ "want",
 ]
 
 [[package]]
 name = "hyper-rustls"
-version = "0.21.0"
+version = "0.22.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37743cc83e8ee85eacfce90f2f4102030d9ff0a95244098d781e9bee4a90abb6"
+checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64"
 dependencies = [
- "bytes 0.5.6",
  "ct-logs",
  "futures-util",
- "hyper 0.13.10",
+ "hyper",
  "log",
- "rustls 0.18.1",
+ "rustls",
  "rustls-native-certs",
- "tokio 0.2.25",
+ "tokio",
  "tokio-rustls",
- "webpki 0.21.4",
+ "webpki",
+]
+
+[[package]]
+name = "hyper-tls"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905"
+dependencies = [
+ "bytes 1.1.0",
+ "hyper",
+ "native-tls",
+ "tokio",
+ "tokio-native-tls",
 ]
 
 [[package]]
@@ -2906,9 +3365,9 @@ dependencies = [
 
 [[package]]
 name = "idna"
-version = "0.2.2"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21"
+checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8"
 dependencies = [
  "matches",
  "unicode-bidi",
@@ -2917,9 +3376,9 @@ dependencies = [
 
 [[package]]
 name = "if-addrs"
-version = "0.6.5"
+version = "0.6.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48"
+checksum = "c9a83ec4af652890ac713ffd8dc859e650420a5ef47f7b9be29b6664ab50fbc8"
 dependencies = [
  "if-addrs-sys",
  "libc",
@@ -2938,12 +3397,12 @@ dependencies = [
 
 [[package]]
 name = "if-watch"
-version = "0.2.0"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a6d52908d4ea4ab2bc22474ba149bf1011c8e2c3ebc1ff593ae28ac44f494b6"
+checksum = "ae8ab7f67bad3240049cb24fb9cb0b4c2c6af4c245840917fbbdededeee91179"
 dependencies = [
  "async-io",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-lite",
  "if-addrs",
  "ipnet",
@@ -2954,9 +3413,9 @@ dependencies = [
 
 [[package]]
 name = "impl-codec"
-version = "0.5.0"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed"
+checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443"
 dependencies = [
  "parity-scale-codec",
 ]
@@ -2992,9 +3451,9 @@ dependencies = [
 
 [[package]]
 name = "indexmap"
-version = "1.6.1"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb1fa934250de4de8aef298d81c729a7d33d8c239daa3a7575e6b92bfc7313b"
+checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5"
 dependencies = [
  "autocfg",
  "hashbrown",
@@ -3003,13 +3462,19 @@ dependencies = [
 
 [[package]]
 name = "instant"
-version = "0.1.9"
+version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec"
+checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d"
 dependencies = [
  "cfg-if 1.0.0",
 ]
 
+[[package]]
+name = "integer-encoding"
+version = "1.1.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f"
+
 [[package]]
 name = "integer-sqrt"
 version = "0.1.5"
@@ -3025,7 +3490,7 @@ version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 2.0.2",
 ]
 
@@ -3040,9 +3505,9 @@ dependencies = [
 
 [[package]]
 name = "ip_network"
-version = "0.3.4"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ee15951c035f79eddbef745611ec962f63f4558f1dadf98ab723cc603487c6f"
+checksum = "09b746553d2f4a1ca26fab939943ddfb217a091f34f53571620a8e3d30691303"
 
 [[package]]
 name = "ipconfig"
@@ -3053,28 +3518,29 @@ dependencies = [
  "socket2 0.3.19",
  "widestring",
  "winapi 0.3.9",
- "winreg",
+ "winreg 0.6.2",
 ]
 
 [[package]]
 name = "ipnet"
-version = "2.3.0"
+version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135"
+checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9"
 
 [[package]]
 name = "isahc"
-version = "1.2.0"
+version = "1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "33b24d2aed6bbe6faeab0e164ec2e9e6193fcfcfe489b6eb59fb0d0d34947d73"
+checksum = "431445cb4ba85a80cb1438a9ae8042dadb78ae4046ecee89ad027b614aa0ddb7"
 dependencies = [
- "crossbeam-utils 0.8.3",
+ "async-channel",
+ "crossbeam-utils",
  "curl",
  "curl-sys",
  "encoding_rs",
- "flume",
+ "event-listener",
  "futures-lite",
- "http 0.2.3",
+ "http",
  "log",
  "mime",
  "once_cell",
@@ -3083,48 +3549,39 @@ dependencies = [
  "sluice",
  "tracing",
  "tracing-futures",
- "url 2.2.1",
+ "url 2.2.2",
  "waker-fn",
 ]
 
 [[package]]
 name = "itertools"
-version = "0.9.0"
+version = "0.10.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b"
+checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf"
 dependencies = [
  "either",
 ]
 
 [[package]]
-name = "itertools"
-version = "0.10.0"
+name = "itoa"
+version = "0.4.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37d572918e350e82412fe766d24b15e6682fb2ed2bbe018280caa810397cb319"
-dependencies = [
- "either",
-]
-
-[[package]]
-name = "itoa"
-version = "0.4.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736"
+checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4"
 
 [[package]]
 name = "jobserver"
-version = "0.1.21"
+version = "0.1.24"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
+checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "js-sys"
-version = "0.3.50"
+version = "0.3.54"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c"
+checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254"
 dependencies = [
  "wasm-bindgen",
 ]
@@ -3144,13 +3601,13 @@ dependencies = [
 
 [[package]]
 name = "jsonrpc-client-transports"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7"
+checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a"
 dependencies = [
- "failure",
- "futures 0.1.31",
- "jsonrpc-core 15.1.0",
+ "derive_more",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-pubsub",
  "log",
  "serde",
@@ -3160,11 +3617,13 @@ dependencies = [
 
 [[package]]
 name = "jsonrpc-core"
-version = "15.1.0"
+version = "17.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa"
+checksum = "d4467ab6dfa369b69e52bd0692e480c4d117410538526a57a304a0f2250fd95e"
 dependencies = [
- "futures 0.1.31",
+ "futures 0.3.17",
+ "futures-executor",
+ "futures-util",
  "log",
  "serde",
  "serde_derive",
@@ -3173,11 +3632,13 @@ dependencies = [
 
 [[package]]
 name = "jsonrpc-core"
-version = "17.0.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07569945133257ff557eb37b015497104cea61a2c9edaf126c1cbd6e8332397f"
+checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
+ "futures-executor",
+ "futures-util",
  "log",
  "serde",
  "serde_derive",
@@ -3186,18 +3647,19 @@ dependencies = [
 
 [[package]]
 name = "jsonrpc-core-client"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db"
+checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0"
 dependencies = [
+ "futures 0.3.17",
  "jsonrpc-client-transports",
 ]
 
 [[package]]
 name = "jsonrpc-derive"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3"
+checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2"
 dependencies = [
  "proc-macro-crate 0.1.5",
  "proc-macro2",
@@ -3207,84 +3669,92 @@ dependencies = [
 
 [[package]]
 name = "jsonrpc-http-server"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7"
+checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff"
 dependencies = [
- "hyper 0.12.36",
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "hyper",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-server-utils",
  "log",
  "net2",
- "parking_lot 0.10.2",
+ "parking_lot 0.11.2",
  "unicase",
 ]
 
 [[package]]
 name = "jsonrpc-ipc-server"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7"
+checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845"
 dependencies = [
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-server-utils",
  "log",
  "parity-tokio-ipc",
- "parking_lot 0.10.2",
- "tokio-service",
+ "parking_lot 0.11.2",
+ "tower-service",
 ]
 
 [[package]]
 name = "jsonrpc-pubsub"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77"
+checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011"
 dependencies = [
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
+ "lazy_static",
  "log",
- "parking_lot 0.10.2",
+ "parking_lot 0.11.2",
  "rand 0.7.3",
  "serde",
 ]
 
 [[package]]
 name = "jsonrpc-server-utils"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176"
+checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4"
 dependencies = [
- "bytes 0.4.12",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "globset",
- "jsonrpc-core 15.1.0",
+ "jsonrpc-core 18.0.0",
  "lazy_static",
  "log",
- "tokio 0.1.22",
- "tokio-codec",
+ "tokio",
+ "tokio-stream",
+ "tokio-util",
  "unicase",
 ]
 
 [[package]]
 name = "jsonrpc-ws-server"
-version = "15.1.0"
+version = "18.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22"
+checksum = "f892c7d766369475ab7b0669f417906302d7c0fb521285c0a0c92e52e7c8e946"
 dependencies = [
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-server-utils",
  "log",
  "parity-ws",
- "parking_lot 0.10.2",
+ "parking_lot 0.11.2",
  "slab",
 ]
 
 [[package]]
 name = "jsonrpsee-proc-macros"
-version = "0.2.0-alpha.6"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5784ee8bb31988fa2c7a755fe31b0e21aa51894a67e5c99b6d4470f0253bf31a"
+checksum = "8edb341d35279b59c79d7fe9e060a51aec29d45af99cc7c72ea7caa350fa71a4"
 dependencies = [
  "Inflector",
- "proc-macro-crate 1.0.0",
+ "bae",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -3292,40 +3762,44 @@ dependencies = [
 
 [[package]]
 name = "jsonrpsee-types"
-version = "0.2.0-alpha.6"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bab3dabceeeeb865897661d532d47202eaae71cd2c606f53cb69f1fbc0555a51"
+checksum = "4cc738fd55b676ada3271ef7c383a14a0867a2a88b0fa941311bf5fc0a29d498"
 dependencies = [
  "async-trait",
  "beef",
  "futures-channel",
  "futures-util",
+ "hyper",
  "log",
  "serde",
  "serde_json",
+ "soketto 0.6.0",
  "thiserror",
 ]
 
 [[package]]
 name = "jsonrpsee-ws-client"
-version = "0.2.0-alpha.6"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6fdb4390bd25358c62e8b778652a564a1723ba07dca0feb3da439c2253fe59f"
+checksum = "9841352dbecf4c2ed5dc71698df9f1660262ae4e0b610e968602529bdbcf7b30"
 dependencies = [
- "async-std",
- "async-tls",
  "async-trait",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "jsonrpsee-types",
  "log",
- "pin-project 1.0.5",
+ "pin-project 1.0.8",
+ "rustls",
+ "rustls-native-certs",
  "serde",
  "serde_json",
- "soketto",
+ "soketto 0.6.0",
  "thiserror",
- "url 2.2.1",
- "webpki 0.22.0",
+ "tokio",
+ "tokio-rustls",
+ "tokio-util",
+ "url 2.2.2",
 ]
 
 [[package]]
@@ -3355,30 +3829,48 @@ dependencies = [
 
 [[package]]
 name = "kvdb"
-version = "0.9.0"
+version = "0.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8891bd853eff90e33024195d79d578dc984c82f9e0715fcd2b525a0c19d52811"
+checksum = "45a3f58dc069ec0e205a27f5b45920722a46faed802a0541538241af6228f512"
 dependencies = [
  "parity-util-mem",
- "smallvec 1.6.1",
+ "smallvec",
 ]
 
 [[package]]
 name = "kvdb-memorydb"
-version = "0.9.0"
+version = "0.10.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30a0da8e08caf08d384a620ec19bb6c9b85c84137248e202617fb91881f25912"
+checksum = "c3b6b85fc643f5acd0bffb2cc8a6d150209379267af0d41db72170021841f9f5"
 dependencies = [
  "kvdb",
  "parity-util-mem",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
 ]
 
 [[package]]
 name = "kvdb-rocksdb"
-version = "0.11.0"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d169dbb316aa0fa185d02d847c047f1aa20e292cf1563d790c13536a2a732c8"
+dependencies = [
+ "fs-swap",
+ "kvdb",
+ "log",
+ "num_cpus",
+ "owning_ref",
+ "parity-util-mem",
+ "parking_lot 0.11.2",
+ "regex",
+ "rocksdb",
+ "smallvec",
+]
+
+[[package]]
+name = "kvdb-rocksdb"
+version = "0.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34446c373ccc494c2124439281c198c7636ccdc2752c06722bbffd56d459c1e4"
+checksum = "9b1b6ea8f2536f504b645ad78419c8246550e19d2c3419a167080ce08edee35a"
 dependencies = [
  "fs-swap",
  "kvdb",
@@ -3386,10 +3878,10 @@ dependencies = [
  "num_cpus",
  "owning_ref",
  "parity-util-mem",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "regex",
  "rocksdb",
- "smallvec 1.6.1",
+ "smallvec",
 ]
 
 [[package]]
@@ -3404,17 +3896,11 @@ version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
 
-[[package]]
-name = "leb128"
-version = "0.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a"
-
 [[package]]
 name = "libc"
-version = "0.2.97"
+version = "0.2.103"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12b8adadd720df158f4d70dfe7ccc6adb0472d7c55ca83445f6a5ab3e36f8fb6"
+checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6"
 
 [[package]]
 name = "libloading"
@@ -3426,6 +3912,16 @@ dependencies = [
  "winapi 0.3.9",
 ]
 
+[[package]]
+name = "libloading"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a"
+dependencies = [
+ "cfg-if 1.0.0",
+ "winapi 0.3.9",
+]
+
 [[package]]
 name = "libm"
 version = "0.2.1"
@@ -3444,13 +3940,13 @@ dependencies = [
 
 [[package]]
 name = "libp2p"
-version = "0.37.1"
+version = "0.39.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08053fbef67cd777049ef7a95ebaca2ece370b4ed7712c3fa404d69a88cb741b"
+checksum = "9004c06878ef8f3b4b4067e69a140d87ed20bf777287f82223e49713b36ee433"
 dependencies = [
  "atomic",
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "lazy_static",
  "libp2p-core",
  "libp2p-deflate",
@@ -3474,41 +3970,41 @@ dependencies = [
  "libp2p-wasm-ext",
  "libp2p-websocket",
  "libp2p-yamux",
- "parity-multiaddr",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
- "smallvec 1.6.1",
+ "multiaddr",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
+ "smallvec",
  "wasm-timer",
 ]
 
 [[package]]
 name = "libp2p-core"
-version = "0.28.3"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "554d3e7e9e65f939d66b75fd6a4c67f258fe250da61b91f46c545fc4a89b51d9"
+checksum = "af9b4abdeaa420593a297c8592f63fad4234f4b88dc9343b8fd8e736c35faa59"
 dependencies = [
  "asn1_der",
  "bs58",
  "ed25519-dalek",
  "either",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "lazy_static",
- "libsecp256k1",
+ "libsecp256k1 0.5.0",
  "log",
- "multihash",
+ "multiaddr",
+ "multihash 0.14.0",
  "multistream-select",
- "parity-multiaddr",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
  "prost",
  "prost-build",
  "rand 0.7.3",
  "ring",
  "rw-stream-sink",
- "sha2 0.9.3",
- "smallvec 1.6.1",
+ "sha2 0.9.8",
+ "smallvec",
  "thiserror",
  "unsigned-varint 0.7.0",
  "void",
@@ -3517,59 +4013,59 @@ dependencies = [
 
 [[package]]
 name = "libp2p-deflate"
-version = "0.28.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08"
+checksum = "66097fccc0b7f8579f90a03ea76ba6196332ea049fd07fd969490a06819dcdc8"
 dependencies = [
  "flate2",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
 ]
 
 [[package]]
 name = "libp2p-dns"
-version = "0.28.1"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b"
+checksum = "58ff08b3196b85a17f202d80589e93b1660a574af67275706657fdc762e42c32"
 dependencies = [
  "async-std-resolver",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "log",
- "smallvec 1.6.1",
+ "smallvec",
  "trust-dns-resolver",
 ]
 
 [[package]]
 name = "libp2p-floodsub"
-version = "0.29.0"
+version = "0.30.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "48a9b570f6766301d9c4aa00fce3554cad1598e2f466debbc4dde909028417cf"
+checksum = "404eca8720967179dac7a5b4275eb91f904a53859c69ca8d018560ad6beb214f"
 dependencies = [
  "cuckoofilter",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "libp2p-swarm",
  "log",
  "prost",
  "prost-build",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
 ]
 
 [[package]]
 name = "libp2p-gossipsub"
-version = "0.30.1"
+version = "0.32.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7b0c8506a6ec3344b9e706d7c7a6dba826f8ede735cfe13dde12a8c263c4af9"
+checksum = "b1cc48709bcbc3a3321f08a73560b4bbb4166a7d56f6fdb615bc775f4f91058e"
 dependencies = [
  "asynchronous-codec 0.6.0",
  "base64 0.13.0",
  "byteorder",
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "hex_fmt",
  "libp2p-core",
  "libp2p-swarm",
@@ -3578,48 +4074,48 @@ dependencies = [
  "prost-build",
  "rand 0.7.3",
  "regex",
- "sha2 0.9.3",
- "smallvec 1.6.1",
+ "sha2 0.9.8",
+ "smallvec",
  "unsigned-varint 0.7.0",
  "wasm-timer",
 ]
 
 [[package]]
 name = "libp2p-identify"
-version = "0.29.0"
+version = "0.30.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f668f00efd9883e8b7bcc582eaf0164615792608f886f6577da18bcbeea0a46"
+checksum = "a7b61f6cf07664fb97016c318c4d4512b3dd4cc07238607f3f0163245f99008e"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "libp2p-swarm",
  "log",
  "prost",
  "prost-build",
- "smallvec 1.6.1",
+ "smallvec",
  "wasm-timer",
 ]
 
 [[package]]
 name = "libp2p-kad"
-version = "0.30.0"
+version = "0.31.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b07312ebe5ee4fd2404447a0609814574df55c65d4e20838b957bbd34907d820"
+checksum = "50ed78489c87924235665a0ab345b298ee34dff0f7ad62c0ba6608b2144fb75e"
 dependencies = [
  "arrayvec 0.5.2",
  "asynchronous-codec 0.6.0",
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "either",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "libp2p-swarm",
  "log",
  "prost",
  "prost-build",
  "rand 0.7.3",
- "sha2 0.9.3",
- "smallvec 1.6.1",
+ "sha2 0.9.8",
+ "smallvec",
  "uint",
  "unsigned-varint 0.7.0",
  "void",
@@ -3628,59 +4124,59 @@ dependencies = [
 
 [[package]]
 name = "libp2p-mdns"
-version = "0.30.1"
+version = "0.31.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41e282f974c4bea56db8acca50387f05189406e346318cb30190b0bde662961e"
+checksum = "a29e6cbc2a24b8471b6567e580a0e8e7b70a6d0f0ea2be0844d1e842d7d4fa33"
 dependencies = [
  "async-io",
  "data-encoding",
  "dns-parser",
- "futures 0.3.13",
+ "futures 0.3.17",
  "if-watch",
  "lazy_static",
  "libp2p-core",
  "libp2p-swarm",
  "log",
- "rand 0.8.3",
- "smallvec 1.6.1",
- "socket2 0.4.0",
+ "rand 0.8.4",
+ "smallvec",
+ "socket2 0.4.1",
  "void",
 ]
 
 [[package]]
 name = "libp2p-mplex"
-version = "0.28.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85e9b544335d1ed30af71daa96edbefadef6f19c7a55f078b9fc92c87163105d"
+checksum = "313d9ea526c68df4425f580024e67a9d3ffd49f2c33de5154b1f5019816f7a99"
 dependencies = [
  "asynchronous-codec 0.6.0",
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "libp2p-core",
  "log",
  "nohash-hasher",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
  "unsigned-varint 0.7.0",
 ]
 
 [[package]]
 name = "libp2p-noise"
-version = "0.30.0"
+version = "0.32.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "36db0f0db3b0433f5b9463f1c0cd9eadc0a3734a9170439ce501ff99733a88bd"
+checksum = "3f1db7212f342b6ba7c981cc40e31f76e9e56cb48e65fa4c142ecaca5839523e"
 dependencies = [
- "bytes 1.0.1",
- "curve25519-dalek 3.0.2",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "curve25519-dalek 3.2.0",
+ "futures 0.3.17",
  "lazy_static",
  "libp2p-core",
  "log",
  "prost",
  "prost-build",
- "rand 0.7.3",
- "sha2 0.9.3",
+ "rand 0.8.4",
+ "sha2 0.9.8",
  "snow",
  "static_assertions",
  "x25519-dalek",
@@ -3689,11 +4185,11 @@ dependencies = [
 
 [[package]]
 name = "libp2p-ping"
-version = "0.29.0"
+version = "0.30.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf4bfaffac63bf3c7ec11ed9d8879d455966ddea7e78ee14737f0b6dce0d1cd1"
+checksum = "2482cfd9eb0b7a0baaf3e7b329dc4f2785181a161b1a47b7192f8d758f54a439"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "libp2p-swarm",
  "log",
@@ -3704,13 +4200,13 @@ dependencies = [
 
 [[package]]
 name = "libp2p-plaintext"
-version = "0.28.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c8c37b4d2a075b4be8442760a5f8c037180f0c8dd5b5734b9978ab868b3aa11"
+checksum = "13b4783e5423870b9a5c199f65a7a3bc66d86ab56b2b9beebf3c338d889cf8e4"
 dependencies = [
  "asynchronous-codec 0.6.0",
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "libp2p-core",
  "log",
  "prost",
@@ -3721,13 +4217,13 @@ dependencies = [
 
 [[package]]
 name = "libp2p-pnet"
-version = "0.20.0"
+version = "0.21.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ce3374f3b28162db9d3442c9347c4f14cb01e8290052615c7d341d40eae0599"
+checksum = "07cb4dd4b917e5b40ddefe49b96b07adcd8d342e0317011d175b7b2bb1dcc974"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "log",
- "pin-project 1.0.5",
+ "pin-project 1.0.8",
  "rand 0.7.3",
  "salsa20",
  "sha3",
@@ -3735,22 +4231,22 @@ dependencies = [
 
 [[package]]
 name = "libp2p-relay"
-version = "0.2.0"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b8786aca3f18671d8776289706a5521f6c9124a820f69e358de214b9939440d"
+checksum = "0133f6cfd81cdc16e716de2982e012c62e6b9d4f12e41967b3ee361051c622aa"
 dependencies = [
  "asynchronous-codec 0.6.0",
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "libp2p-core",
  "libp2p-swarm",
  "log",
- "pin-project 1.0.5",
+ "pin-project 1.0.8",
  "prost",
  "prost-build",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
  "unsigned-varint 0.7.0",
  "void",
  "wasm-timer",
@@ -3758,45 +4254,45 @@ dependencies = [
 
 [[package]]
 name = "libp2p-request-response"
-version = "0.11.0"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1cdbe172f08e6d0f95fa8634e273d4c4268c4063de2e33e7435194b0130c62e3"
+checksum = "06cdae44b6821466123af93cbcdec7c9e6ba9534a8af9cdc296446d39416d241"
 dependencies = [
  "async-trait",
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "libp2p-core",
  "libp2p-swarm",
  "log",
- "lru",
+ "lru 0.6.6",
  "minicbor",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
  "unsigned-varint 0.7.0",
  "wasm-timer",
 ]
 
 [[package]]
 name = "libp2p-swarm"
-version = "0.29.0"
+version = "0.30.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e04d8e1eef675029ec728ba14e8d0da7975d84b6679b699b4ae91a1de9c3a92"
+checksum = "7083861341e1555467863b4cd802bea1e8c4787c0f7b5110097d0f1f3248f9a9"
 dependencies = [
  "either",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "log",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
  "void",
  "wasm-timer",
 ]
 
 [[package]]
 name = "libp2p-swarm-derive"
-version = "0.23.0"
+version = "0.24.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "365b0a699fea5168676840567582a012ea297b1ca02eee467e58301b9c9c5eed"
+checksum = "ab8cb308d4fc854869f5abb54fdab0833d2cf670d407c745849dc47e6e08d79c"
 dependencies = [
  "quote",
  "syn",
@@ -3804,40 +4300,40 @@ dependencies = [
 
 [[package]]
 name = "libp2p-tcp"
-version = "0.28.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b1a27d21c477951799e99d5c105d78868258502ce092988040a808d5a19bbd9"
+checksum = "79edd26b6b4bb5feee210dcda562dca186940dfecb0024b979c3f50824b3bf28"
 dependencies = [
  "async-io",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "if-watch",
  "ipnet",
  "libc",
  "libp2p-core",
  "log",
- "socket2 0.4.0",
+ "socket2 0.4.1",
 ]
 
 [[package]]
 name = "libp2p-uds"
-version = "0.28.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5"
+checksum = "280e793440dd4e9f273d714f4497325c72cddb0fe85a49f9a03c88f41dd20182"
 dependencies = [
  "async-std",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
  "log",
 ]
 
 [[package]]
 name = "libp2p-wasm-ext"
-version = "0.28.1"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de"
+checksum = "f553b7140fad3d7a76f50497b0ea591e26737d9607428a75509fc191e4d1b1f6"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "js-sys",
  "libp2p-core",
  "parity-send-wrapper",
@@ -3847,40 +4343,40 @@ dependencies = [
 
 [[package]]
 name = "libp2p-websocket"
-version = "0.29.0"
+version = "0.30.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cace60995ef6f637e4752cccbb2590f6bc358e8741a0d066307636c69a4b3a74"
+checksum = "ddf99dcbf5063e9d59087f61b1e85c686ceab2f5abedb472d32288065c0e5e27"
 dependencies = [
  "either",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-rustls",
  "libp2p-core",
  "log",
  "quicksink",
  "rw-stream-sink",
- "soketto",
- "url 2.2.1",
+ "soketto 0.4.2",
+ "url 2.2.2",
  "webpki-roots",
 ]
 
 [[package]]
 name = "libp2p-yamux"
-version = "0.32.0"
+version = "0.33.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f35da42cfc6d5cb0dcf3ad6881bc68d146cdf38f98655e09e33fbba4d13eabc4"
+checksum = "214cc0dd9c37cbed27f0bb1eba0c41bbafdb93a8be5e9d6ae1e6b4b42cd044bf"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p-core",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "thiserror",
  "yamux",
 ]
 
 [[package]]
 name = "librocksdb-sys"
-version = "6.11.4"
+version = "6.20.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09"
+checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d"
 dependencies = [
  "bindgen",
  "cc",
@@ -3890,25 +4386,124 @@ dependencies = [
 
 [[package]]
 name = "libsecp256k1"
-version = "0.3.5"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962"
+checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7"
 dependencies = [
  "arrayref",
- "crunchy",
- "digest 0.8.1",
+ "base64 0.12.3",
+ "digest 0.9.0",
  "hmac-drbg",
+ "libsecp256k1-core 0.2.2",
+ "libsecp256k1-gen-ecmult 0.2.1",
+ "libsecp256k1-gen-genmult 0.2.1",
  "rand 0.7.3",
- "sha2 0.8.2",
- "subtle 2.4.0",
+ "serde",
+ "sha2 0.9.8",
+ "typenum",
+]
+
+[[package]]
+name = "libsecp256k1"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73"
+dependencies = [
+ "arrayref",
+ "base64 0.12.3",
+ "digest 0.9.0",
+ "hmac-drbg",
+ "libsecp256k1-core 0.2.2",
+ "libsecp256k1-gen-ecmult 0.2.1",
+ "libsecp256k1-gen-genmult 0.2.1",
+ "rand 0.7.3",
+ "serde",
+ "sha2 0.9.8",
+ "typenum",
+]
+
+[[package]]
+name = "libsecp256k1"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37"
+dependencies = [
+ "arrayref",
+ "base64 0.13.0",
+ "digest 0.9.0",
+ "hmac-drbg",
+ "libsecp256k1-core 0.3.0",
+ "libsecp256k1-gen-ecmult 0.3.0",
+ "libsecp256k1-gen-genmult 0.3.0",
+ "rand 0.8.4",
+ "serde",
+ "sha2 0.9.8",
  "typenum",
 ]
 
+[[package]]
+name = "libsecp256k1-core"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d0f6ab710cec28cef759c5f18671a27dae2a5f952cdaaee1d8e2908cb2478a80"
+dependencies = [
+ "crunchy",
+ "digest 0.9.0",
+ "subtle",
+]
+
+[[package]]
+name = "libsecp256k1-core"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451"
+dependencies = [
+ "crunchy",
+ "digest 0.9.0",
+ "subtle",
+]
+
+[[package]]
+name = "libsecp256k1-gen-ecmult"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3"
+dependencies = [
+ "libsecp256k1-core 0.2.2",
+]
+
+[[package]]
+name = "libsecp256k1-gen-ecmult"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809"
+dependencies = [
+ "libsecp256k1-core 0.3.0",
+]
+
+[[package]]
+name = "libsecp256k1-gen-genmult"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d"
+dependencies = [
+ "libsecp256k1-core 0.2.2",
+]
+
+[[package]]
+name = "libsecp256k1-gen-genmult"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c"
+dependencies = [
+ "libsecp256k1-core 0.3.0",
+]
+
 [[package]]
 name = "libz-sys"
-version = "1.1.2"
+version = "1.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "602113192b08db8f38796c4e85c39e960c145965140e918018bcde1952429655"
+checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66"
 dependencies = [
  "cc",
  "libc",
@@ -3933,9 +4528,9 @@ dependencies = [
 
 [[package]]
 name = "linregress"
-version = "0.4.0"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724"
+checksum = "d6c601a85f5ecd1aba625247bca0031585fb1c446461b142878a16f8245ddeb8"
 dependencies = [
  "nalgebra",
  "statrs",
@@ -3952,9 +4547,9 @@ dependencies = [
 
 [[package]]
 name = "lock_api"
-version = "0.4.2"
+version = "0.4.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312"
+checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109"
 dependencies = [
  "scopeguard",
 ]
@@ -3971,9 +4566,18 @@ dependencies = [
 
 [[package]]
 name = "lru"
-version = "0.6.5"
+version = "0.6.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ea2d928b485416e8908cff2d97d621db22b27f7b3b6729e438bcf42c671ba91"
+dependencies = [
+ "hashbrown",
+]
+
+[[package]]
+name = "lru"
+version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba"
+checksum = "6c748cfe47cb8da225c37595b3108bea1c198c84aaae8ea0ba76d01dda9fc803"
 dependencies = [
  "hashbrown",
 ]
@@ -3987,6 +4591,26 @@ dependencies = [
  "linked-hash-map",
 ]
 
+[[package]]
+name = "lz4"
+version = "1.23.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aac20ed6991e01bf6a2e68cc73df2b389707403662a8ba89f68511fb340f724c"
+dependencies = [
+ "libc",
+ "lz4-sys",
+]
+
+[[package]]
+name = "lz4-sys"
+version = "1.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dca79aa95d8b3226213ad454d328369853be3a1382d89532a854f4d69640acae"
+dependencies = [
+ "cc",
+ "libc",
+]
+
 [[package]]
 name = "mach"
 version = "0.3.2"
@@ -4019,52 +4643,24 @@ dependencies = [
 
 [[package]]
 name = "matches"
-version = "0.1.8"
+version = "0.1.9"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
+checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f"
 
 [[package]]
 name = "matrixmultiply"
-version = "0.2.4"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "916806ba0031cd542105d916a97c8572e1fa6dd79c9c51e7eb43a09ec2dd84c1"
+checksum = "5a8a15b776d9dfaecd44b03c5828c2199cddff5247215858aac14624f8d6b741"
 dependencies = [
  "rawpointer",
 ]
 
-[[package]]
-name = "max-encoded-len"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
-dependencies = [
- "impl-trait-for-tuples",
- "max-encoded-len-derive",
- "parity-scale-codec",
- "primitive-types",
-]
-
-[[package]]
-name = "max-encoded-len-derive"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
-dependencies = [
- "proc-macro-crate 1.0.0",
- "proc-macro2",
- "quote",
- "syn",
-]
-
-[[package]]
-name = "maybe-uninit"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
-
 [[package]]
 name = "memchr"
-version = "2.3.4"
+version = "2.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
 
 [[package]]
 name = "memmap"
@@ -4078,40 +4674,40 @@ dependencies = [
 
 [[package]]
 name = "memmap2"
-version = "0.2.1"
+version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04e3e85b970d650e2ae6d70592474087051c11c54da7f7b4949725c5735fbcc6"
+checksum = "723e3ebdcdc5c023db1df315364573789f8857c11b631a2fdfad7c00f5c046b4"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "memoffset"
-version = "0.5.6"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa"
+checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9"
 dependencies = [
  "autocfg",
 ]
 
 [[package]]
-name = "memoffset"
-version = "0.6.1"
+name = "memory-db"
+version = "0.27.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "157b4208e3059a8f9e78d559edc658e13df41410cb3ae03979c83130067fdd87"
+checksum = "de006e09d04fc301a5f7e817b75aa49801c4479a8af753764416b085337ddcc5"
 dependencies = [
- "autocfg",
+ "hash-db",
+ "hashbrown",
+ "parity-util-mem",
 ]
 
 [[package]]
-name = "memory-db"
-version = "0.26.0"
+name = "memory-lru"
+version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "814bbecfc0451fc314eeea34f05bbcd5b98a7ad7af37faee088b86a1e633f1d4"
+checksum = "beeb98b3d1ed2c0054bd81b5ba949a0243c3ccad751d45ea898fa8059fa2860a"
 dependencies = [
- "hash-db",
- "hashbrown",
- "parity-util-mem",
+ "lru 0.6.6",
 ]
 
 [[package]]
@@ -4140,24 +4736,46 @@ dependencies = [
  "async-trait",
  "bp-messages",
  "bp-runtime",
- "futures 0.3.13",
+ "futures 0.3.17",
  "hex",
  "log",
  "num-traits",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "relay-utils",
 ]
 
+[[package]]
+name = "metered-channel"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "mick-jaeger"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c023c3f16109e7f33aa451f773fd61070e265b4977d0b6e344a51049296dd7df"
+dependencies = [
+ "futures 0.3.17",
+ "rand 0.7.3",
+ "thrift",
+]
+
 [[package]]
 name = "millau-bridge-node"
 version = "0.1.0"
 dependencies = [
- "bp-messages",
  "bp-millau",
  "bp-runtime",
  "frame-benchmarking",
  "frame-benchmarking-cli",
- "jsonrpc-core 15.1.0",
+ "jsonrpc-core 18.0.0",
  "millau-runtime",
  "node-inspect",
  "pallet-bridge-messages",
@@ -4180,7 +4798,6 @@ dependencies = [
  "sp-consensus-aura",
  "sp-core",
  "sp-finality-grandpa",
- "sp-inherents",
  "sp-runtime",
  "sp-timestamp",
  "structopt",
@@ -4199,16 +4816,18 @@ dependencies = [
  "bp-runtime",
  "bp-westend",
  "bridge-runtime-common",
+ "frame-benchmarking",
  "frame-executive",
  "frame-support",
  "frame-system",
  "frame-system-rpc-runtime-api",
- "hex-literal 0.3.1",
+ "hex-literal 0.3.3",
  "pallet-aura",
  "pallet-balances",
  "pallet-bridge-dispatch",
  "pallet-bridge-grandpa",
  "pallet-bridge-messages",
+ "pallet-bridge-token-swap",
  "pallet-grandpa",
  "pallet-randomness-collective-flip",
  "pallet-session",
@@ -4218,6 +4837,7 @@ dependencies = [
  "pallet-transaction-payment",
  "pallet-transaction-payment-rpc-runtime-api",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-api",
  "sp-block-builder",
@@ -4243,18 +4863,18 @@ checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d"
 
 [[package]]
 name = "minicbor"
-version = "0.8.0"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ea79ce4ab9f445ec6b71833a2290ac0a29c9dde0fa7cae4c481eecae021d9bd9"
+checksum = "51aa5bb0ca22415daca596a227b507f880ad1b2318a87fa9325312a5d285ca0d"
 dependencies = [
  "minicbor-derive",
 ]
 
 [[package]]
 name = "minicbor-derive"
-version = "0.6.2"
+version = "0.6.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19ce18b5423c573a13e80cb3046ea0af6379ef725dc3af4886bdb8f4e5093068"
+checksum = "54999f917cd092b13904737e26631aa2b2b88d625db68e4bab461dcd8006c788"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -4291,38 +4911,28 @@ dependencies = [
 ]
 
 [[package]]
-name = "mio-extras"
-version = "2.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19"
-dependencies = [
- "lazycell",
- "log",
- "mio",
- "slab",
-]
-
-[[package]]
-name = "mio-named-pipes"
-version = "0.1.7"
+name = "mio"
+version = "0.7.13"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656"
+checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16"
 dependencies = [
+ "libc",
  "log",
- "mio",
- "miow 0.3.6",
+ "miow 0.3.7",
+ "ntapi",
  "winapi 0.3.9",
 ]
 
 [[package]]
-name = "mio-uds"
-version = "0.6.8"
+name = "mio-extras"
+version = "2.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0"
+checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19"
 dependencies = [
- "iovec",
- "libc",
- "mio",
+ "lazycell",
+ "log",
+ "mio 0.6.23",
+ "slab",
 ]
 
 [[package]]
@@ -4339,11 +4949,10 @@ dependencies = [
 
 [[package]]
 name = "miow"
-version = "0.3.6"
+version = "0.3.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897"
+checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21"
 dependencies = [
- "socket2 0.3.19",
  "winapi 0.3.9",
 ]
 
@@ -4353,6 +4962,24 @@ version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238"
 
+[[package]]
+name = "multiaddr"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "48ee4ea82141951ac6379f964f71b20876d43712bea8faf6dd1a375e08a46499"
+dependencies = [
+ "arrayref",
+ "bs58",
+ "byteorder",
+ "data-encoding",
+ "multihash 0.14.0",
+ "percent-encoding 2.1.0",
+ "serde",
+ "static_assertions",
+ "unsigned-varint 0.7.0",
+ "url 2.2.2",
+]
+
 [[package]]
 name = "multibase"
 version = "0.8.0"
@@ -4376,19 +5003,32 @@ dependencies = [
  "digest 0.9.0",
  "generic-array 0.14.4",
  "multihash-derive",
- "sha2 0.9.3",
+ "sha2 0.9.8",
  "sha3",
  "unsigned-varint 0.5.1",
 ]
 
+[[package]]
+name = "multihash"
+version = "0.14.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "752a61cd890ff691b4411423d23816d5866dd5621e4d1c5687a53b94b5a979d8"
+dependencies = [
+ "digest 0.9.0",
+ "generic-array 0.14.4",
+ "multihash-derive",
+ "sha2 0.9.8",
+ "unsigned-varint 0.7.0",
+]
+
 [[package]]
 name = "multihash-derive"
-version = "0.7.1"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "85ee3c48cb9d9b275ad967a0e96715badc13c6029adb92f34fa17b9ff28fd81f"
+checksum = "424f6e86263cd5294cbd7f1e95746b95aca0e0d66bff31e5a40d6baa87b4aa99"
 dependencies = [
- "proc-macro-crate 0.1.5",
- "proc-macro-error",
+ "proc-macro-crate 1.1.0",
+ "proc-macro-error 1.0.4",
  "proc-macro2",
  "quote",
  "syn",
@@ -4397,59 +5037,78 @@ dependencies = [
 
 [[package]]
 name = "multimap"
-version = "0.8.2"
+version = "0.8.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1255076139a83bb467426e7f8d0134968a8118844faa755985e077cf31850333"
+checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a"
 
 [[package]]
 name = "multistream-select"
-version = "0.10.1"
+version = "0.10.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5df70763c86c98487451f307e1b68b4100da9076f4c12146905fc2054277f4e8"
+checksum = "7d91ec0a2440aaff5f78ec35631a7027d50386c6163aa975f7caa0d5da4b6ff8"
 dependencies = [
- "bytes 1.0.1",
- "futures 0.3.13",
+ "bytes 1.1.0",
+ "futures 0.3.17",
  "log",
- "pin-project 1.0.5",
- "smallvec 1.6.1",
+ "pin-project 1.0.8",
+ "smallvec",
  "unsigned-varint 0.7.0",
 ]
 
 [[package]]
 name = "nalgebra"
-version = "0.21.1"
+version = "0.27.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486"
+checksum = "462fffe4002f4f2e1f6a9dcf12cc1a6fc0e15989014efc02a941d3e0f5dc2120"
 dependencies = [
  "approx",
- "generic-array 0.13.2",
  "matrixmultiply",
+ "nalgebra-macros",
  "num-complex",
- "num-rational",
+ "num-rational 0.4.0",
  "num-traits",
- "rand 0.7.3",
+ "rand 0.8.4",
  "rand_distr",
  "simba",
  "typenum",
 ]
 
+[[package]]
+name = "nalgebra-macros"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01fcc0b8149b4632adc89ac3b7b31a12fb6099a0317a4eb2ebff574ef7de7218"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "names"
-version = "0.11.0"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da"
+checksum = "10a8690bf09abf659851e58cd666c3d37ac6af07c2bd7a9e332cfba471715775"
 dependencies = [
- "rand 0.3.23",
+ "rand 0.8.4",
 ]
 
 [[package]]
-name = "nb-connect"
-version = "1.0.3"
+name = "native-tls"
+version = "0.2.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "670361df1bc2399ee1ff50406a0d422587dd3bb0da596e1978fe8e05dabddf4f"
+checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d"
 dependencies = [
+ "lazy_static",
  "libc",
- "socket2 0.3.19",
+ "log",
+ "openssl",
+ "openssl-probe",
+ "openssl-sys",
+ "schannel",
+ "security-framework",
+ "security-framework-sys",
+ "tempfile",
 ]
 
 [[package]]
@@ -4465,14 +5124,14 @@ dependencies = [
 
 [[package]]
 name = "node-inspect"
-version = "0.8.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.9.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "derive_more",
- "log",
  "parity-scale-codec",
  "sc-cli",
  "sc-client-api",
+ "sc-executor",
  "sc-service",
  "sp-blockchain",
  "sp-core",
@@ -4494,10 +5153,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451"
 
 [[package]]
 name = "nom"
-version = "5.1.2"
+version = "6.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af"
+checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2"
 dependencies = [
+ "bitvec 0.19.5",
+ "funty",
  "memchr",
  "version_check",
 ]
@@ -4524,11 +5185,10 @@ dependencies = [
 
 [[package]]
 name = "num-complex"
-version = "0.2.4"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95"
+checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085"
 dependencies = [
- "autocfg",
  "num-traits",
 ]
 
@@ -4564,6 +5224,17 @@ dependencies = [
  "num-traits",
 ]
 
+[[package]]
+name = "num-rational"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a"
+dependencies = [
+ "autocfg",
+ "num-integer",
+ "num-traits",
+]
+
 [[package]]
 name = "num-traits"
 version = "0.2.14"
@@ -4586,28 +5257,20 @@ dependencies = [
 
 [[package]]
 name = "object"
-version = "0.23.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4"
-
-[[package]]
-name = "object"
-version = "0.24.0"
+version = "0.26.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170"
+checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2"
 dependencies = [
  "crc32fast",
  "indexmap",
+ "memchr",
 ]
 
 [[package]]
 name = "once_cell"
-version = "1.7.0"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10acf907b94fc1b1a152d08ef97e7759650268cf986bf127f387e602b02c7e5a"
-dependencies = [
- "parking_lot 0.11.1",
-]
+checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
 
 [[package]]
 name = "opaque-debug"
@@ -4621,17 +5284,31 @@ version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5"
 
+[[package]]
+name = "openssl"
+version = "0.10.36"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d9facdb76fec0b73c406f125d44d86fdad818d66fef0531eec9233ca425ff4a"
+dependencies = [
+ "bitflags",
+ "cfg-if 1.0.0",
+ "foreign-types",
+ "libc",
+ "once_cell",
+ "openssl-sys",
+]
+
 [[package]]
 name = "openssl-probe"
-version = "0.1.2"
+version = "0.1.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de"
+checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a"
 
 [[package]]
 name = "openssl-sys"
-version = "0.9.61"
+version = "0.9.66"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f"
+checksum = "1996d2d305e561b70d1ee0c53f1542833f4e1ac6ce9a6708b6ff2738ca67dc82"
 dependencies = [
  "autocfg",
  "cc",
@@ -4640,6 +5317,15 @@ dependencies = [
  "vcpkg",
 ]
 
+[[package]]
+name = "ordered-float"
+version = "1.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7"
+dependencies = [
+ "num-traits",
+]
+
 [[package]]
 name = "owning_ref"
 version = "0.4.1"
@@ -4651,45 +5337,159 @@ dependencies = [
 
 [[package]]
 name = "pallet-aura"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
  "frame-system",
- "pallet-session",
  "pallet-timestamp",
  "parity-scale-codec",
+ "scale-info",
  "sp-application-crypto",
  "sp-consensus-aura",
  "sp-runtime",
  "sp-std",
 ]
 
+[[package]]
+name = "pallet-authority-discovery"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "pallet-session",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-application-crypto",
+ "sp-authority-discovery",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "pallet-authorship"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
  "frame-system",
  "impl-trait-for-tuples",
  "parity-scale-codec",
+ "scale-info",
  "sp-authorship",
  "sp-runtime",
  "sp-std",
 ]
 
+[[package]]
+name = "pallet-babe"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-authorship",
+ "pallet-session",
+ "pallet-timestamp",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-application-crypto",
+ "sp-consensus-babe",
+ "sp-consensus-vrf",
+ "sp-io",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-bags-list"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-election-provider-support",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "pallet-balances"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-benchmarking",
  "frame-support",
  "frame-system",
  "log",
- "max-encoded-len",
  "parity-scale-codec",
+ "scale-info",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-beefy"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "beefy-primitives",
+ "frame-support",
+ "frame-system",
+ "pallet-session",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-beefy-mmr"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "beefy-merkle-tree",
+ "beefy-primitives",
+ "frame-support",
+ "frame-system",
+ "hex",
+ "libsecp256k1 0.7.0",
+ "log",
+ "pallet-beefy",
+ "pallet-mmr",
+ "pallet-mmr-primitives",
+ "pallet-session",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-bounties"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-treasury",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-io",
  "sp-runtime",
  "sp-std",
 ]
@@ -4705,6 +5505,7 @@ dependencies = [
  "frame-system",
  "log",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-core",
  "sp-io",
@@ -4722,7 +5523,7 @@ dependencies = [
  "frame-system",
  "log",
  "parity-scale-codec",
- "serde",
+ "scale-info",
  "sp-core",
  "sp-io",
  "sp-runtime",
@@ -4737,10 +5538,11 @@ dependencies = [
  "frame-benchmarking",
  "frame-support",
  "frame-system",
- "hex-literal 0.3.1",
- "libsecp256k1",
+ "hex-literal 0.3.3",
+ "libsecp256k1 0.7.0",
  "log",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-io",
  "sp-runtime",
@@ -4761,7 +5563,9 @@ dependencies = [
  "log",
  "num-traits",
  "parity-scale-codec",
+ "scale-info",
  "serde",
+ "sp-core",
  "sp-finality-grandpa",
  "sp-io",
  "sp-runtime",
@@ -4773,20 +5577,20 @@ dependencies = [
 name = "pallet-bridge-messages"
 version = "0.1.0"
 dependencies = [
- "bitvec",
+ "bitvec 0.20.4",
  "bp-message-dispatch",
  "bp-messages",
- "bp-rialto",
  "bp-runtime",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
  "hex",
- "hex-literal 0.3.1",
+ "hex-literal 0.3.3",
  "log",
  "num-traits",
  "pallet-balances",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-core",
  "sp-io",
@@ -4795,611 +5599,2100 @@ dependencies = [
 ]
 
 [[package]]
-name = "pallet-grandpa"
-version = "3.1.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-bridge-token-swap"
+version = "0.1.0"
 dependencies = [
+ "bp-message-dispatch",
+ "bp-messages",
+ "bp-runtime",
+ "bp-token-swap",
  "frame-benchmarking",
  "frame-support",
  "frame-system",
  "log",
- "pallet-authorship",
- "pallet-session",
+ "pallet-balances",
+ "pallet-bridge-dispatch",
+ "pallet-bridge-messages",
  "parity-scale-codec",
- "sp-application-crypto",
+ "scale-info",
+ "serde",
  "sp-core",
- "sp-finality-grandpa",
  "sp-io",
  "sp-runtime",
- "sp-session",
- "sp-staking",
  "sp-std",
 ]
 
 [[package]]
-name = "pallet-randomness-collective-flip"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-collective"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
+ "frame-benchmarking",
  "frame-support",
  "frame-system",
+ "log",
  "parity-scale-codec",
- "safe-mix",
+ "scale-info",
+ "sp-core",
+ "sp-io",
  "sp-runtime",
  "sp-std",
 ]
 
 [[package]]
-name = "pallet-session"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-democracy"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
+ "frame-benchmarking",
  "frame-support",
  "frame-system",
- "impl-trait-for-tuples",
- "log",
- "pallet-timestamp",
  "parity-scale-codec",
- "sp-core",
+ "scale-info",
+ "serde",
  "sp-io",
  "sp-runtime",
- "sp-session",
- "sp-staking",
  "sp-std",
- "sp-trie",
 ]
 
 [[package]]
-name = "pallet-shift-session-manager"
-version = "0.1.0"
+name = "pallet-election-provider-multi-phase"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
+ "frame-election-provider-support",
  "frame-support",
  "frame-system",
- "pallet-session",
+ "log",
  "parity-scale-codec",
- "serde",
+ "scale-info",
+ "sp-arithmetic",
  "sp-core",
+ "sp-io",
+ "sp-npos-elections",
  "sp-runtime",
- "sp-staking",
  "sp-std",
+ "static_assertions",
 ]
 
 [[package]]
-name = "pallet-sudo"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-elections-phragmen"
+version = "5.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-support",
  "frame-system",
+ "log",
  "parity-scale-codec",
+ "scale-info",
+ "sp-core",
  "sp-io",
+ "sp-npos-elections",
  "sp-runtime",
  "sp-std",
 ]
 
 [[package]]
-name = "pallet-timestamp"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-grandpa"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-benchmarking",
  "frame-support",
  "frame-system",
- "impl-trait-for-tuples",
  "log",
+ "pallet-authorship",
+ "pallet-session",
  "parity-scale-codec",
- "sp-inherents",
+ "scale-info",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-finality-grandpa",
+ "sp-io",
  "sp-runtime",
+ "sp-session",
+ "sp-staking",
  "sp-std",
- "sp-timestamp",
 ]
 
 [[package]]
-name = "pallet-transaction-payment"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-identity"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
+ "enumflags2",
+ "frame-benchmarking",
  "frame-support",
  "frame-system",
  "parity-scale-codec",
- "serde",
- "smallvec 1.6.1",
- "sp-core",
+ "scale-info",
  "sp-io",
  "sp-runtime",
  "sp-std",
 ]
 
 [[package]]
-name = "pallet-transaction-payment-rpc"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-im-online"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "jsonrpc-core 15.1.0",
- "jsonrpc-core-client",
- "jsonrpc-derive",
- "pallet-transaction-payment-rpc-runtime-api",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-authorship",
  "parity-scale-codec",
- "sp-api",
- "sp-blockchain",
+ "scale-info",
+ "sp-application-crypto",
  "sp-core",
- "sp-rpc",
+ "sp-io",
  "sp-runtime",
+ "sp-staking",
+ "sp-std",
 ]
 
 [[package]]
-name = "pallet-transaction-payment-rpc-runtime-api"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "pallet-indices"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "pallet-transaction-payment",
+ "frame-support",
+ "frame-system",
  "parity-scale-codec",
- "sp-api",
+ "scale-info",
+ "sp-core",
+ "sp-io",
+ "sp-keyring",
  "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-bytes"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67"
+name = "pallet-membership"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+]
 
 [[package]]
-name = "parity-db"
-version = "0.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2e337f62db341435f0da05b8f6b97e984ef4ea5800510cd07c2d624688c40b47"
+name = "pallet-mmr"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "blake2-rfc",
- "crc32fast",
- "fs2",
- "hex",
- "libc",
- "log",
- "memmap2",
- "parking_lot 0.11.1",
- "rand 0.8.3",
+ "ckb-merkle-mountain-range",
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "pallet-mmr-primitives",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-multiaddr"
-version = "0.11.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58341485071825827b7f03cf7efd1cb21e6a709bea778fb50227fd45d2f361b4"
+name = "pallet-mmr-primitives"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "arrayref",
- "bs58",
- "byteorder",
- "data-encoding",
- "multihash",
- "percent-encoding 2.1.0",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
  "serde",
- "static_assertions",
- "unsigned-varint 0.7.0",
- "url 2.2.1",
+ "sp-api",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-scale-codec"
-version = "2.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0f518afaa5a47d0d6386229b0a6e01e86427291d643aa4cabb4992219f504f8"
+name = "pallet-mmr-rpc"
+version = "3.0.0"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "arrayvec 0.7.0",
- "bitvec",
- "byte-slice-cast",
- "parity-scale-codec-derive",
+ "jsonrpc-core 18.0.0",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "pallet-mmr-primitives",
+ "parity-scale-codec",
  "serde",
+ "sp-api",
+ "sp-blockchain",
+ "sp-core",
+ "sp-runtime",
 ]
 
 [[package]]
-name = "parity-scale-codec-derive"
-version = "2.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f44c5f94427bd0b5076e8f7e15ca3f60a4d8ac0077e4793884e6fdfd8915344e"
+name = "pallet-multisig"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "proc-macro-crate 0.1.5",
- "proc-macro2",
- "quote",
- "syn",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-send-wrapper"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f"
+name = "pallet-nicks"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+]
 
 [[package]]
-name = "parity-tokio-ipc"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e57fea504fea33f9fbb5f49f378359030e7e026a6ab849bb9e8f0787376f1bf"
+name = "pallet-offences"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "libc",
+ "frame-support",
+ "frame-system",
  "log",
- "mio-named-pipes",
- "miow 0.3.6",
- "rand 0.7.3",
- "tokio 0.1.22",
- "tokio-named-pipes",
- "tokio-uds",
- "winapi 0.3.9",
+ "pallet-balances",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-runtime",
+ "sp-staking",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-util-mem"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "664a8c6b8e62d8f9f2f937e391982eb433ab285b4cd9545b342441e04a906e42"
+name = "pallet-proxy"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "cfg-if 1.0.0",
- "hashbrown",
- "impl-trait-for-tuples",
- "parity-util-mem-derive",
- "parking_lot 0.11.1",
- "primitive-types",
- "smallvec 1.6.1",
- "winapi 0.3.9",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-util-mem-derive"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2"
+name = "pallet-randomness-collective-flip"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "proc-macro2",
- "syn",
- "synstructure",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "safe-mix",
+ "scale-info",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-wasm"
-version = "0.32.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac"
+name = "pallet-scheduler"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "byteorder",
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parity-wasm"
-version = "0.42.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92"
-
-[[package]]
-name = "parity-ws"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61"
+name = "pallet-session"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "byteorder",
- "bytes 0.4.12",
- "httparse",
+ "frame-support",
+ "frame-system",
+ "impl-trait-for-tuples",
  "log",
- "mio",
- "mio-extras",
- "rand 0.7.3",
- "sha-1 0.8.2",
- "slab",
- "url 2.2.1",
+ "pallet-timestamp",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+ "sp-trie",
 ]
 
 [[package]]
-name = "parking"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
+name = "pallet-shift-session-manager"
+version = "0.1.0"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "pallet-session",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-runtime",
+ "sp-staking",
+ "sp-std",
+]
 
 [[package]]
-name = "parking_lot"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
+name = "pallet-staking"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "lock_api 0.3.4",
- "parking_lot_core 0.6.2",
- "rustc_version",
+ "frame-election-provider-support",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-authorship",
+ "pallet-session",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-application-crypto",
+ "sp-io",
+ "sp-runtime",
+ "sp-staking",
+ "sp-std",
 ]
 
 [[package]]
-name = "parking_lot"
-version = "0.10.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
+name = "pallet-staking-reward-curve"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "lock_api 0.3.4",
- "parking_lot_core 0.7.2",
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
-name = "parking_lot"
-version = "0.11.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb"
+name = "pallet-sudo"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "instant",
- "lock_api 0.4.2",
- "parking_lot_core 0.8.3",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parking_lot_core"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
+name = "pallet-timestamp"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "cfg-if 0.1.10",
- "cloudabi",
- "libc",
- "redox_syscall 0.1.57",
- "rustc_version",
- "smallvec 0.6.14",
- "winapi 0.3.9",
+ "frame-benchmarking",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-inherents",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "sp-timestamp",
 ]
 
 [[package]]
-name = "parking_lot_core"
-version = "0.7.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3"
+name = "pallet-tips"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "cfg-if 0.1.10",
- "cloudabi",
- "libc",
- "redox_syscall 0.1.57",
- "smallvec 1.6.1",
- "winapi 0.3.9",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-treasury",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "parking_lot_core"
-version = "0.8.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018"
+name = "pallet-transaction-payment"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "cfg-if 1.0.0",
- "instant",
- "libc",
- "redox_syscall 0.2.5",
- "smallvec 1.6.1",
- "winapi 0.3.9",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "smallvec",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "paste"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880"
+name = "pallet-transaction-payment-rpc"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "paste-impl",
- "proc-macro-hack",
+ "jsonrpc-core 18.0.0",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "parity-scale-codec",
+ "sp-api",
+ "sp-blockchain",
+ "sp-core",
+ "sp-rpc",
+ "sp-runtime",
 ]
 
 [[package]]
-name = "paste"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5d65c4d95931acda4498f675e332fcbdc9a06705cd07086c510e9b6009cd1c1"
+name = "pallet-transaction-payment-rpc-runtime-api"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "pallet-transaction-payment",
+ "parity-scale-codec",
+ "sp-api",
+ "sp-runtime",
+]
 
 [[package]]
-name = "paste-impl"
-version = "0.1.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6"
+name = "pallet-treasury"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "proc-macro-hack",
+ "frame-support",
+ "frame-system",
+ "impl-trait-for-tuples",
+ "pallet-balances",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "pbkdf2"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9"
+name = "pallet-utility"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "byteorder",
- "crypto-mac 0.7.0",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
 ]
 
 [[package]]
-name = "pbkdf2"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd"
+name = "pallet-vesting"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "crypto-mac 0.8.0",
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "pallet-xcm"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+ "xcm-executor",
 ]
 
 [[package]]
-name = "pdqselect"
+name = "parachain-info"
 version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ec91767ecc0a0bbe558ce8c9da33c068066c57ecc8bb8477ef8c1ad3ef77c27"
+source = "git+https://github.com/paritytech/cumulus?branch=master#9379cd6c1863ea846ad6c6a8cbbc99848dd5d693"
+dependencies = [
+ "cumulus-primitives-core",
+ "frame-support",
+ "frame-system",
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+]
 
 [[package]]
-name = "peeking_take_while"
+name = "parity-bytes"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
+checksum = "16b56e3a2420138bdb970f84dfb9c774aea80fa0e7371549eedec0d80c209c67"
 
 [[package]]
-name = "percent-encoding"
-version = "1.0.1"
+name = "parity-db"
+version = "0.3.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831"
+checksum = "91b679c6acc14fac74382942e2b73bea441686a33430b951ea03b5aeb6a7f254"
+dependencies = [
+ "blake2-rfc",
+ "crc32fast",
+ "fs2",
+ "hex",
+ "libc",
+ "log",
+ "lz4",
+ "memmap2",
+ "parking_lot 0.11.2",
+ "rand 0.8.4",
+ "snap",
+]
 
 [[package]]
-name = "percent-encoding"
-version = "2.1.0"
+name = "parity-scale-codec"
+version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
+checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909"
+dependencies = [
+ "arrayvec 0.7.1",
+ "bitvec 0.20.4",
+ "byte-slice-cast",
+ "impl-trait-for-tuples",
+ "parity-scale-codec-derive",
+ "serde",
+]
 
 [[package]]
-name = "pest"
-version = "2.1.3"
+name = "parity-scale-codec-derive"
+version = "2.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53"
+checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27"
 dependencies = [
- "ucd-trie",
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
-name = "pest_derive"
-version = "2.1.0"
+name = "parity-send-wrapper"
+version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0"
+checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f"
+
+[[package]]
+name = "parity-tokio-ipc"
+version = "0.9.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6"
 dependencies = [
- "pest",
- "pest_generator",
+ "futures 0.3.17",
+ "libc",
+ "log",
+ "rand 0.7.3",
+ "tokio",
+ "winapi 0.3.9",
 ]
 
 [[package]]
-name = "pest_generator"
-version = "2.1.3"
+name = "parity-util-mem"
+version = "0.10.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55"
+checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770"
+dependencies = [
+ "cfg-if 1.0.0",
+ "ethereum-types",
+ "hashbrown",
+ "impl-trait-for-tuples",
+ "lru 0.6.6",
+ "parity-util-mem-derive",
+ "parking_lot 0.11.2",
+ "primitive-types",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "parity-util-mem-derive"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2"
 dependencies = [
- "pest",
- "pest_meta",
  "proc-macro2",
- "quote",
  "syn",
+ "synstructure",
 ]
 
 [[package]]
-name = "pest_meta"
-version = "2.1.3"
+name = "parity-wasm"
+version = "0.32.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d"
+checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac"
 dependencies = [
- "maplit",
- "pest",
- "sha-1 0.8.2",
+ "byteorder",
 ]
 
 [[package]]
-name = "petgraph"
-version = "0.5.1"
+name = "parity-wasm"
+version = "0.42.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
+checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92"
+
+[[package]]
+name = "parity-ws"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d0ab8a461779bd022964cae2b4989fa9c99deb270bec162da2125ec03c09fcaa"
 dependencies = [
- "fixedbitset",
- "indexmap",
+ "byteorder",
+ "bytes 0.4.12",
+ "httparse",
+ "log",
+ "mio 0.6.23",
+ "mio-extras",
+ "rand 0.7.3",
+ "sha-1 0.8.2",
+ "slab",
+ "url 2.2.2",
+]
+
+[[package]]
+name = "parking"
+version = "2.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72"
+
+[[package]]
+name = "parking_lot"
+version = "0.10.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e"
+dependencies = [
+ "lock_api 0.3.4",
+ "parking_lot_core 0.7.2",
+]
+
+[[package]]
+name = "parking_lot"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99"
+dependencies = [
+ "instant",
+ "lock_api 0.4.5",
+ "parking_lot_core 0.8.5",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.7.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3"
+dependencies = [
+ "cfg-if 0.1.10",
+ "cloudabi",
+ "libc",
+ "redox_syscall 0.1.57",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "parking_lot_core"
+version = "0.8.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216"
+dependencies = [
+ "cfg-if 1.0.0",
+ "instant",
+ "libc",
+ "redox_syscall 0.2.10",
+ "smallvec",
+ "winapi 0.3.9",
+]
+
+[[package]]
+name = "paste"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58"
+
+[[package]]
+name = "pbkdf2"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd"
+dependencies = [
+ "crypto-mac 0.8.0",
+]
+
+[[package]]
+name = "pbkdf2"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa"
+dependencies = [
+ "crypto-mac 0.11.1",
+]
+
+[[package]]
+name = "peeking_take_while"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
+
+[[package]]
+name = "percent-encoding"
+version = "1.0.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831"
+
+[[package]]
+name = "percent-encoding"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
+
+[[package]]
+name = "pest"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53"
+dependencies = [
+ "ucd-trie",
+]
+
+[[package]]
+name = "pest_derive"
+version = "2.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0"
+dependencies = [
+ "pest",
+ "pest_generator",
+]
+
+[[package]]
+name = "pest_generator"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55"
+dependencies = [
+ "pest",
+ "pest_meta",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pest_meta"
+version = "2.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d"
+dependencies = [
+ "maplit",
+ "pest",
+ "sha-1 0.8.2",
+]
+
+[[package]]
+name = "petgraph"
+version = "0.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
+dependencies = [
+ "fixedbitset",
+ "indexmap",
+]
+
+[[package]]
+name = "pin-project"
+version = "0.4.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f"
+dependencies = [
+ "pin-project-internal 0.4.28",
+]
+
+[[package]]
+name = "pin-project"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08"
+dependencies = [
+ "pin-project-internal 1.0.8",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "0.4.28"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pin-project-internal"
+version = "1.0.8"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "pin-project-lite"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
+
+[[package]]
+name = "pin-project-lite"
+version = "0.2.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443"
+
+[[package]]
+name = "pin-utils"
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
+
+[[package]]
+name = "pkg-config"
+version = "0.3.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
+
+[[package]]
+name = "plain_hasher"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc"
+dependencies = [
+ "crunchy",
+]
+
+[[package]]
+name = "platforms"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325"
+
+[[package]]
+name = "polkadot-approval-distribution"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-availability-bitfield-distribution"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "polkadot-node-network-protocol",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-availability-distribution"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "lru 0.7.0",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "sp-core",
+ "sp-keystore",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-availability-recovery"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "lru 0.7.0",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "sc-network",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-cli"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "frame-benchmarking-cli",
+ "futures 0.3.17",
+ "log",
+ "polkadot-node-core-pvf",
+ "polkadot-service",
+ "sc-cli",
+ "sc-service",
+ "sp-core",
+ "sp-trie",
+ "structopt",
+ "substrate-build-script-utils",
+ "thiserror",
+ "try-runtime-cli",
+]
+
+[[package]]
+name = "polkadot-client"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "beefy-primitives",
+ "frame-benchmarking",
+ "frame-system-rpc-runtime-api",
+ "pallet-mmr-primitives",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "polkadot-primitives",
+ "polkadot-runtime",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-executor",
+ "sc-service",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-babe",
+ "sp-finality-grandpa",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-storage",
+ "sp-transaction-pool",
+]
+
+[[package]]
+name = "polkadot-collator-protocol"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "always-assert",
+ "derive_more",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sp-core",
+ "sp-keystore",
+ "sp-runtime",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-core-primitives"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "parity-scale-codec",
+ "parity-util-mem",
+ "scale-info",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "polkadot-dispute-distribution"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "lru 0.7.0",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sc-network",
+ "sp-application-crypto",
+ "sp-keystore",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-erasure-coding"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "parity-scale-codec",
+ "polkadot-node-primitives",
+ "polkadot-primitives",
+ "reed-solomon-novelpoly",
+ "sp-core",
+ "sp-trie",
+ "thiserror",
+]
+
+[[package]]
+name = "polkadot-gossip-support"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "polkadot-node-network-protocol",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "rand_chacha 0.3.1",
+ "sc-network",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-keystore",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-network-bridge"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "parking_lot 0.11.2",
+ "polkadot-node-network-protocol",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "sc-network",
+ "sp-consensus",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-collation-generation"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sp-core",
+ "sp-maybe-compressed-blob",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-approval-voting"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "derive_more",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "kvdb",
+ "lru 0.7.0",
+ "merlin",
+ "parity-scale-codec",
+ "polkadot-node-jaeger",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "sc-keystore",
+ "schnorrkel",
+ "sp-application-crypto",
+ "sp-consensus",
+ "sp-consensus-slots",
+ "sp-runtime",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-av-store"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "kvdb",
+ "parity-scale-codec",
+ "polkadot-erasure-coding",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-backing"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "futures 0.3.17",
+ "polkadot-erasure-coding",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "polkadot-statement-table",
+ "sp-keystore",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-bitfield-signing"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sp-keystore",
+ "thiserror",
+ "tracing",
+ "wasm-timer",
+]
+
+[[package]]
+name = "polkadot-node-core-candidate-validation"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-node-core-pvf",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "sp-maybe-compressed-blob",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-chain-api"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sc-client-api",
+ "sc-consensus-babe",
+ "sp-blockchain",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-chain-selection"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "kvdb",
+ "parity-scale-codec",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-dispute-coordinator"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "derive_more",
+ "futures 0.3.17",
+ "kvdb",
+ "parity-scale-codec",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sc-keystore",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-dispute-participation"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-primitives",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-parachains-inherent"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "polkadot-node-subsystem",
+ "polkadot-primitives",
+ "sp-blockchain",
+ "sp-inherents",
+ "sp-runtime",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-provisioner"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-pvf"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "always-assert",
+ "assert_matches",
+ "async-process",
+ "async-std",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "libc",
+ "parity-scale-codec",
+ "pin-project 1.0.8",
+ "polkadot-core-primitives",
+ "polkadot-node-subsystem-util",
+ "polkadot-parachain",
+ "rand 0.8.4",
+ "sc-executor",
+ "sc-executor-common",
+ "sc-executor-wasmtime",
+ "slotmap",
+ "sp-core",
+ "sp-externalities",
+ "sp-io",
+ "sp-maybe-compressed-blob",
+ "sp-tracing",
+ "sp-wasm-interface",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-core-runtime-api"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "memory-lru",
+ "parity-util-mem",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-consensus-babe",
+ "sp-core",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-node-jaeger"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-std",
+ "lazy_static",
+ "log",
+ "mick-jaeger",
+ "parity-scale-codec",
+ "parking_lot 0.11.2",
+ "polkadot-node-primitives",
+ "polkadot-primitives",
+ "sc-network",
+ "sp-core",
+ "thiserror",
+]
+
+[[package]]
+name = "polkadot-node-metrics"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "metered-channel",
+ "substrate-prometheus-endpoint",
+]
+
+[[package]]
+name = "polkadot-node-network-protocol"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "derive_more",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-node-jaeger",
+ "polkadot-node-primitives",
+ "polkadot-primitives",
+ "sc-authority-discovery",
+ "sc-network",
+ "strum 0.21.0",
+ "thiserror",
+]
+
+[[package]]
+name = "polkadot-node-primitives"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bounded-vec",
+ "futures 0.3.17",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "schnorrkel",
+ "serde",
+ "sp-application-crypto",
+ "sp-consensus-babe",
+ "sp-consensus-vrf",
+ "sp-core",
+ "sp-keystore",
+ "sp-maybe-compressed-blob",
+ "thiserror",
+ "zstd",
+]
+
+[[package]]
+name = "polkadot-node-subsystem"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "polkadot-node-jaeger",
+ "polkadot-node-subsystem-types",
+ "polkadot-overseer",
+]
+
+[[package]]
+name = "polkadot-node-subsystem-types"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "polkadot-node-jaeger",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-overseer-gen",
+ "polkadot-primitives",
+ "polkadot-statement-table",
+ "sc-network",
+ "smallvec",
+ "substrate-prometheus-endpoint",
+ "thiserror",
+]
+
+[[package]]
+name = "polkadot-node-subsystem-util"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "derive_more",
+ "futures 0.3.17",
+ "itertools",
+ "lru 0.7.0",
+ "metered-channel",
+ "parity-scale-codec",
+ "pin-project 1.0.8",
+ "polkadot-node-jaeger",
+ "polkadot-node-metrics",
+ "polkadot-node-network-protocol",
+ "polkadot-node-subsystem",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "sp-application-crypto",
+ "sp-core",
+ "sp-keystore",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-overseer"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "lru 0.7.0",
+ "parity-util-mem",
+ "parking_lot 0.11.2",
+ "polkadot-node-metrics",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem-types",
+ "polkadot-overseer-gen",
+ "polkadot-primitives",
+ "sc-client-api",
+ "sp-api",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-overseer-gen"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "async-trait",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "metered-channel",
+ "pin-project 1.0.8",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-overseer-gen-proc-macro",
+ "thiserror",
+ "tracing",
+]
+
+[[package]]
+name = "polkadot-overseer-gen-proc-macro"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "polkadot-parachain"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derive_more",
+ "frame-support",
+ "parity-scale-codec",
+ "parity-util-mem",
+ "polkadot-core-primitives",
+ "scale-info",
+ "serde",
+ "sp-core",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "polkadot-primitives"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitvec 0.20.4",
+ "frame-system",
+ "hex-literal 0.3.3",
+ "parity-scale-codec",
+ "parity-util-mem",
+ "polkadot-core-primitives",
+ "polkadot-parachain",
+ "scale-info",
+ "serde",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-arithmetic",
+ "sp-authority-discovery",
+ "sp-consensus-slots",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-staking",
+ "sp-std",
+ "sp-trie",
+ "sp-version",
+]
+
+[[package]]
+name = "polkadot-rpc"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "beefy-gadget",
+ "beefy-gadget-rpc",
+ "jsonrpc-core 18.0.0",
+ "pallet-mmr-rpc",
+ "pallet-transaction-payment-rpc",
+ "polkadot-primitives",
+ "sc-chain-spec",
+ "sc-client-api",
+ "sc-consensus-babe",
+ "sc-consensus-babe-rpc",
+ "sc-consensus-epochs",
+ "sc-finality-grandpa",
+ "sc-finality-grandpa-rpc",
+ "sc-rpc",
+ "sc-sync-state-rpc",
+ "sc-transaction-pool-api",
+ "sp-api",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-babe",
+ "sp-keystore",
+ "sp-runtime",
+ "substrate-frame-rpc-system",
+]
+
+[[package]]
+name = "polkadot-runtime"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "beefy-primitives",
+ "bitvec 0.20.4",
+ "frame-election-provider-support",
+ "frame-executive",
+ "frame-support",
+ "frame-system",
+ "frame-system-rpc-runtime-api",
+ "frame-try-runtime",
+ "log",
+ "pallet-authority-discovery",
+ "pallet-authorship",
+ "pallet-babe",
+ "pallet-balances",
+ "pallet-bounties",
+ "pallet-collective",
+ "pallet-democracy",
+ "pallet-election-provider-multi-phase",
+ "pallet-elections-phragmen",
+ "pallet-grandpa",
+ "pallet-identity",
+ "pallet-im-online",
+ "pallet-indices",
+ "pallet-membership",
+ "pallet-mmr-primitives",
+ "pallet-multisig",
+ "pallet-nicks",
+ "pallet-offences",
+ "pallet-proxy",
+ "pallet-scheduler",
+ "pallet-session",
+ "pallet-staking",
+ "pallet-staking-reward-curve",
+ "pallet-timestamp",
+ "pallet-tips",
+ "pallet-transaction-payment",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "pallet-treasury",
+ "pallet-utility",
+ "pallet-vesting",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "polkadot-runtime-common",
+ "polkadot-runtime-parachains",
+ "rustc-hex",
+ "scale-info",
+ "serde",
+ "serde_derive",
+ "smallvec",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-block-builder",
+ "sp-consensus-babe",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-npos-elections",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+ "sp-transaction-pool",
+ "sp-version",
+ "static_assertions",
+ "substrate-wasm-builder",
+]
+
+[[package]]
+name = "polkadot-runtime-common"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "beefy-primitives",
+ "bitvec 0.20.4",
+ "frame-election-provider-support",
+ "frame-support",
+ "frame-system",
+ "impl-trait-for-tuples",
+ "libsecp256k1 0.7.0",
+ "log",
+ "pallet-authorship",
+ "pallet-bags-list",
+ "pallet-balances",
+ "pallet-beefy-mmr",
+ "pallet-election-provider-multi-phase",
+ "pallet-session",
+ "pallet-staking",
+ "pallet-timestamp",
+ "pallet-transaction-payment",
+ "pallet-treasury",
+ "pallet-vesting",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "polkadot-runtime-parachains",
+ "rustc-hex",
+ "scale-info",
+ "serde",
+ "serde_derive",
+ "slot-range-helper",
+ "sp-api",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-npos-elections",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+ "static_assertions",
+ "xcm",
+]
+
+[[package]]
+name = "polkadot-runtime-parachains"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "bitflags",
+ "bitvec 0.20.4",
+ "derive_more",
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-authority-discovery",
+ "pallet-authorship",
+ "pallet-balances",
+ "pallet-session",
+ "pallet-staking",
+ "pallet-timestamp",
+ "pallet-vesting",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "rand 0.8.4",
+ "rand_chacha 0.3.1",
+ "rustc-hex",
+ "scale-info",
+ "serde",
+ "sp-api",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+ "xcm",
+ "xcm-executor",
 ]
 
 [[package]]
-name = "pin-project"
-version = "0.4.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2ffbc8e94b38ea3d2d8ba92aea2983b503cd75d0888d75b86bb37970b5698e15"
+name = "polkadot-service"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
 dependencies = [
- "pin-project-internal 0.4.27",
+ "async-trait",
+ "beefy-gadget",
+ "beefy-primitives",
+ "frame-system-rpc-runtime-api",
+ "futures 0.3.17",
+ "hex-literal 0.3.3",
+ "kvdb",
+ "kvdb-rocksdb 0.14.0",
+ "lru 0.7.0",
+ "pallet-babe",
+ "pallet-im-online",
+ "pallet-mmr-primitives",
+ "pallet-staking",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "polkadot-approval-distribution",
+ "polkadot-availability-bitfield-distribution",
+ "polkadot-availability-distribution",
+ "polkadot-availability-recovery",
+ "polkadot-client",
+ "polkadot-collator-protocol",
+ "polkadot-dispute-distribution",
+ "polkadot-gossip-support",
+ "polkadot-network-bridge",
+ "polkadot-node-collation-generation",
+ "polkadot-node-core-approval-voting",
+ "polkadot-node-core-av-store",
+ "polkadot-node-core-backing",
+ "polkadot-node-core-bitfield-signing",
+ "polkadot-node-core-candidate-validation",
+ "polkadot-node-core-chain-api",
+ "polkadot-node-core-chain-selection",
+ "polkadot-node-core-dispute-coordinator",
+ "polkadot-node-core-dispute-participation",
+ "polkadot-node-core-parachains-inherent",
+ "polkadot-node-core-provisioner",
+ "polkadot-node-core-runtime-api",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "polkadot-rpc",
+ "polkadot-runtime",
+ "polkadot-runtime-parachains",
+ "polkadot-statement-distribution",
+ "sc-authority-discovery",
+ "sc-basic-authorship",
+ "sc-block-builder",
+ "sc-chain-spec",
+ "sc-client-api",
+ "sc-client-db",
+ "sc-consensus",
+ "sc-consensus-babe",
+ "sc-consensus-slots",
+ "sc-consensus-uncles",
+ "sc-executor",
+ "sc-finality-grandpa",
+ "sc-keystore",
+ "sc-network",
+ "sc-service",
+ "sc-sync-state-rpc",
+ "sc-telemetry",
+ "sc-transaction-pool",
+ "serde",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-babe",
+ "sp-core",
+ "sp-finality-grandpa",
+ "sp-inherents",
+ "sp-io",
+ "sp-keystore",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-state-machine",
+ "sp-storage",
+ "sp-timestamp",
+ "sp-transaction-pool",
+ "sp-trie",
+ "substrate-prometheus-endpoint",
+ "thiserror",
+ "tracing",
 ]
 
 [[package]]
-name = "pin-project"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63"
+name = "polkadot-statement-distribution"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
 dependencies = [
- "pin-project-internal 1.0.5",
+ "arrayvec 0.5.2",
+ "derive_more",
+ "futures 0.3.17",
+ "indexmap",
+ "parity-scale-codec",
+ "polkadot-node-network-protocol",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-node-subsystem-util",
+ "polkadot-primitives",
+ "sp-keystore",
+ "sp-staking",
+ "thiserror",
+ "tracing",
 ]
 
 [[package]]
-name = "pin-project-internal"
-version = "0.4.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "65ad2ae56b6abe3a1ee25f15ee605bacadb9a764edaba9c2bf4103800d4a1895"
+name = "polkadot-statement-table"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
 dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "parity-scale-codec",
+ "polkadot-primitives",
+ "sp-core",
 ]
 
 [[package]]
-name = "pin-project-internal"
-version = "1.0.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b"
+name = "polkadot-test-runtime"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
 dependencies = [
- "proc-macro2",
- "quote",
- "syn",
+ "beefy-primitives",
+ "bitvec 0.20.4",
+ "frame-election-provider-support",
+ "frame-executive",
+ "frame-support",
+ "frame-system",
+ "frame-system-rpc-runtime-api",
+ "log",
+ "pallet-authority-discovery",
+ "pallet-authorship",
+ "pallet-babe",
+ "pallet-balances",
+ "pallet-grandpa",
+ "pallet-indices",
+ "pallet-mmr-primitives",
+ "pallet-nicks",
+ "pallet-offences",
+ "pallet-session",
+ "pallet-staking",
+ "pallet-staking-reward-curve",
+ "pallet-sudo",
+ "pallet-timestamp",
+ "pallet-transaction-payment",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "pallet-vesting",
+ "pallet-xcm",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "polkadot-runtime-common",
+ "polkadot-runtime-parachains",
+ "rustc-hex",
+ "scale-info",
+ "serde",
+ "serde_derive",
+ "smallvec",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-block-builder",
+ "sp-consensus-babe",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-staking",
+ "sp-std",
+ "sp-transaction-pool",
+ "sp-version",
+ "substrate-wasm-builder",
+ "xcm",
+ "xcm-builder",
+ "xcm-executor",
 ]
 
 [[package]]
-name = "pin-project-lite"
-version = "0.1.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c917123afa01924fc84bb20c4c03f004d9c38e5127e3c039bbf7f4b9c76a2f6b"
-
-[[package]]
-name = "pin-project-lite"
-version = "0.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827"
-
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
-[[package]]
-name = "pkg-config"
-version = "0.3.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c"
-
-[[package]]
-name = "plain_hasher"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e19e6491bdde87c2c43d70f4c194bc8a758f2eb732df00f61e43f7362e3b4cc"
+name = "polkadot-test-service"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
 dependencies = [
- "crunchy",
+ "frame-benchmarking",
+ "frame-system",
+ "futures 0.1.31",
+ "futures 0.3.17",
+ "hex",
+ "pallet-balances",
+ "pallet-staking",
+ "pallet-transaction-payment",
+ "polkadot-node-primitives",
+ "polkadot-node-subsystem",
+ "polkadot-overseer",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "polkadot-rpc",
+ "polkadot-runtime-common",
+ "polkadot-runtime-parachains",
+ "polkadot-service",
+ "polkadot-test-runtime",
+ "rand 0.8.4",
+ "sc-authority-discovery",
+ "sc-chain-spec",
+ "sc-cli",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-consensus-babe",
+ "sc-executor",
+ "sc-finality-grandpa",
+ "sc-network",
+ "sc-service",
+ "sc-tracing",
+ "sc-transaction-pool",
+ "sp-arithmetic",
+ "sp-authority-discovery",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-babe",
+ "sp-core",
+ "sp-finality-grandpa",
+ "sp-inherents",
+ "sp-keyring",
+ "sp-runtime",
+ "sp-state-machine",
+ "substrate-test-client",
+ "tempfile",
+ "tokio",
+ "tracing",
 ]
 
-[[package]]
-name = "platforms"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325"
-
 [[package]]
 name = "polling"
-version = "2.0.2"
+version = "2.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4"
+checksum = "92341d779fa34ea8437ef4d82d440d5e1ce3f3ff7f824aa64424cd481f9a1f25"
 dependencies = [
- "cfg-if 0.1.10",
+ "cfg-if 1.0.0",
  "libc",
  "log",
- "wepoll-sys",
+ "wepoll-ffi",
  "winapi 0.3.9",
 ]
 
 [[package]]
 name = "poly1305"
-version = "0.6.2"
+version = "0.7.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b7456bc1ad2d4cf82b3a016be4c2ac48daf11bf990c1603ebd447fe6f30fca8"
+checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede"
 dependencies = [
- "cpuid-bool 0.2.0",
+ "cpufeatures 0.2.1",
+ "opaque-debug 0.3.0",
  "universal-hash",
 ]
 
 [[package]]
 name = "polyval"
-version = "0.4.5"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eebcc4aa140b9abd2bc40d9c3f7ccec842679cd79045ac3a7ac698c1a064b7cd"
+checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1"
 dependencies = [
- "cpuid-bool 0.2.0",
+ "cfg-if 1.0.0",
+ "cpufeatures 0.2.1",
  "opaque-debug 0.3.0",
  "universal-hash",
 ]
@@ -5412,14 +7705,15 @@ checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
 
 [[package]]
 name = "primitive-types"
-version = "0.9.0"
+version = "0.10.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2415937401cb030a2a0a4d922483f945fa068f52a7dbb22ce0fe5f2b6f6adace"
+checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373"
 dependencies = [
  "fixed-hash",
  "impl-codec",
  "impl-rlp",
  "impl-serde",
+ "scale-info",
  "uint",
 ]
 
@@ -5434,24 +7728,50 @@ dependencies = [
 
 [[package]]
 name = "proc-macro-crate"
-version = "1.0.0"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92"
+checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83"
 dependencies = [
  "thiserror",
  "toml",
 ]
 
+[[package]]
+name = "proc-macro-error"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7"
+dependencies = [
+ "proc-macro-error-attr 0.4.12",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
 [[package]]
 name = "proc-macro-error"
 version = "1.0.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
 dependencies = [
- "proc-macro-error-attr",
+ "proc-macro-error-attr 1.0.4",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "0.4.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de"
+dependencies = [
  "proc-macro2",
  "quote",
  "syn",
+ "syn-mid",
  "version_check",
 ]
 
@@ -5480,63 +7800,63 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086"
 
 [[package]]
 name = "proc-macro2"
-version = "1.0.26"
+version = "1.0.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec"
+checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d"
 dependencies = [
  "unicode-xid",
 ]
 
 [[package]]
 name = "prometheus"
-version = "0.11.0"
+version = "0.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d"
+checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c"
 dependencies = [
  "cfg-if 1.0.0",
  "fnv",
  "lazy_static",
- "parking_lot 0.11.1",
- "regex",
+ "memchr",
+ "parking_lot 0.11.2",
  "thiserror",
 ]
 
 [[package]]
 name = "prost"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2"
+checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "prost-derive",
 ]
 
 [[package]]
 name = "prost-build"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3"
+checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "heck",
- "itertools 0.9.0",
+ "itertools",
  "log",
  "multimap",
  "petgraph",
  "prost",
  "prost-types",
  "tempfile",
- "which 4.0.2",
+ "which",
 ]
 
 [[package]]
 name = "prost-derive"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4"
+checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba"
 dependencies = [
  "anyhow",
- "itertools 0.9.0",
+ "itertools",
  "proc-macro2",
  "quote",
  "syn",
@@ -5544,28 +7864,28 @@ dependencies = [
 
 [[package]]
 name = "prost-types"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb"
+checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "prost",
 ]
 
 [[package]]
 name = "psm"
-version = "0.1.12"
+version = "0.1.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3abf49e5417290756acfd26501536358560c4a5cc4a0934d390939acb3e7083a"
+checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69"
 dependencies = [
  "cc",
 ]
 
 [[package]]
 name = "pwasm-utils"
-version = "0.18.0"
+version = "0.18.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a0e517f47d9964362883182404b68d0b6949382c0baa40aa5ffca94f5f1e3481"
+checksum = "880b3384fb00b8f6ecccd5d358b93bd2201900ae3daad213791d1864f6441f5c"
 dependencies = [
  "byteorder",
  "log",
@@ -5580,9 +7900,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
 
 [[package]]
 name = "quick-error"
-version = "2.0.0"
+version = "2.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ac73b1112776fc109b2e61909bc46c7e1bf0d7f690ffb1676553acce16d5cda"
+checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3"
 
 [[package]]
 name = "quicksink"
@@ -5592,46 +7912,29 @@ checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858"
 dependencies = [
  "futures-core",
  "futures-sink",
- "pin-project-lite 0.1.11",
+ "pin-project-lite 0.1.12",
 ]
 
 [[package]]
 name = "quote"
-version = "1.0.9"
+version = "1.0.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
+checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05"
 dependencies = [
  "proc-macro2",
 ]
 
 [[package]]
 name = "radium"
-version = "0.6.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb"
-
-[[package]]
-name = "rand"
-version = "0.3.23"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c"
-dependencies = [
- "libc",
- "rand 0.4.6",
-]
+checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8"
 
 [[package]]
-name = "rand"
-version = "0.4.6"
+name = "radium"
+version = "0.6.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293"
-dependencies = [
- "fuchsia-cprng",
- "libc",
- "rand_core 0.3.1",
- "rdrand",
- "winapi 0.3.9",
-]
+checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb"
 
 [[package]]
 name = "rand"
@@ -5649,51 +7952,36 @@ dependencies = [
 
 [[package]]
 name = "rand"
-version = "0.8.3"
+version = "0.8.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
 dependencies = [
  "libc",
- "rand_chacha 0.3.0",
- "rand_core 0.6.2",
- "rand_hc 0.3.0",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
-dependencies = [
- "ppv-lite86",
- "rand_core 0.5.1",
+ "rand_chacha 0.3.1",
+ "rand_core 0.6.3",
+ "rand_hc 0.3.1",
 ]
 
 [[package]]
 name = "rand_chacha"
-version = "0.3.0"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d"
+checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
 dependencies = [
  "ppv-lite86",
- "rand_core 0.6.2",
+ "rand_core 0.5.1",
 ]
 
 [[package]]
-name = "rand_core"
+name = "rand_chacha"
 version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
 dependencies = [
- "rand_core 0.4.2",
+ "ppv-lite86",
+ "rand_core 0.6.3",
 ]
 
-[[package]]
-name = "rand_core"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc"
-
 [[package]]
 name = "rand_core"
 version = "0.5.1"
@@ -5705,20 +7993,21 @@ dependencies = [
 
 [[package]]
 name = "rand_core"
-version = "0.6.2"
+version = "0.6.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
 dependencies = [
- "getrandom 0.2.2",
+ "getrandom 0.2.3",
 ]
 
 [[package]]
 name = "rand_distr"
-version = "0.2.2"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2"
+checksum = "051b398806e42b9cd04ad9ec8f81e355d0a382c543ac6672c62f5a5b452ef142"
 dependencies = [
- "rand 0.7.3",
+ "num-traits",
+ "rand 0.8.4",
 ]
 
 [[package]]
@@ -5732,11 +8021,11 @@ dependencies = [
 
 [[package]]
 name = "rand_hc"
-version = "0.3.0"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
 dependencies = [
- "rand_core 0.6.2",
+ "rand_core 0.6.3",
 ]
 
 [[package]]
@@ -5756,38 +8045,29 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3"
 
 [[package]]
 name = "rayon"
-version = "1.5.0"
+version = "1.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b0d8e0819fadc20c74ea8373106ead0600e3a67ef1fe8da56e39b9ae7275674"
+checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
 dependencies = [
  "autocfg",
- "crossbeam-deque 0.8.0",
+ "crossbeam-deque",
  "either",
  "rayon-core",
 ]
 
 [[package]]
 name = "rayon-core"
-version = "1.9.0"
+version = "1.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a"
+checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
 dependencies = [
  "crossbeam-channel",
- "crossbeam-deque 0.8.0",
- "crossbeam-utils 0.8.3",
+ "crossbeam-deque",
+ "crossbeam-utils",
  "lazy_static",
  "num_cpus",
 ]
 
-[[package]]
-name = "rdrand"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
-dependencies = [
- "rand_core 0.3.1",
-]
-
 [[package]]
 name = "redox_syscall"
 version = "0.1.57"
@@ -5796,32 +8076,34 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
 
 [[package]]
 name = "redox_syscall"
-version = "0.2.5"
+version = "0.2.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9"
+checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
 dependencies = [
  "bitflags",
 ]
 
 [[package]]
 name = "redox_users"
-version = "0.3.5"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d"
+checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
 dependencies = [
- "getrandom 0.1.16",
- "redox_syscall 0.1.57",
- "rust-argon2",
+ "getrandom 0.2.3",
+ "redox_syscall 0.2.10",
 ]
 
 [[package]]
-name = "redox_users"
-version = "0.4.0"
+name = "reed-solomon-novelpoly"
+version = "1.0.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64"
+checksum = "3bd8f48b2066e9f69ab192797d66da804d1935bf22763204ed3675740cb0f221"
 dependencies = [
- "getrandom 0.2.2",
- "redox_syscall 0.2.5",
+ "derive_more",
+ "fs-err",
+ "itertools",
+ "static_init",
+ "thiserror",
 ]
 
 [[package]]
@@ -5853,36 +8135,34 @@ dependencies = [
  "log",
  "rustc-hash",
  "serde",
- "smallvec 1.6.1",
+ "smallvec",
 ]
 
 [[package]]
 name = "regex"
-version = "1.4.3"
+version = "1.5.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a"
+checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
 dependencies = [
  "aho-corasick",
  "memchr",
  "regex-syntax",
- "thread_local",
 ]
 
 [[package]]
 name = "regex-automata"
-version = "0.1.9"
+version = "0.1.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
+checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132"
 dependencies = [
- "byteorder",
  "regex-syntax",
 ]
 
 [[package]]
 name = "regex-syntax"
-version = "0.6.22"
+version = "0.6.25"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
 
 [[package]]
 name = "region"
@@ -5903,13 +8183,14 @@ dependencies = [
  "async-std",
  "bp-eth-poa",
  "headers-relay",
- "hex-literal 0.3.1",
+ "hex-literal 0.3.3",
  "jsonrpsee-proc-macros",
  "jsonrpsee-ws-client",
- "libsecp256k1",
+ "libsecp256k1 0.7.0",
  "log",
- "parity-scale-codec",
  "relay-utils",
+ "thiserror",
+ "tokio",
  "web3",
 ]
 
@@ -5917,16 +8198,21 @@ dependencies = [
 name = "relay-kusama-client"
 version = "0.1.0"
 dependencies = [
+ "bp-header-chain",
  "bp-kusama",
+ "bp-message-dispatch",
+ "bp-messages",
+ "bp-polkadot",
+ "bp-polkadot-core",
+ "bp-runtime",
+ "bridge-runtime-common",
  "frame-support",
- "frame-system",
- "headers-relay",
- "pallet-transaction-payment",
+ "pallet-bridge-dispatch",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
+ "scale-info",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
@@ -5934,16 +8220,15 @@ dependencies = [
 name = "relay-millau-client"
 version = "0.1.0"
 dependencies = [
+ "bp-millau",
  "frame-support",
  "frame-system",
- "headers-relay",
  "millau-runtime",
  "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
@@ -5951,16 +8236,21 @@ dependencies = [
 name = "relay-polkadot-client"
 version = "0.1.0"
 dependencies = [
+ "bp-header-chain",
+ "bp-kusama",
+ "bp-message-dispatch",
+ "bp-messages",
  "bp-polkadot",
+ "bp-polkadot-core",
+ "bp-runtime",
+ "bridge-runtime-common",
  "frame-support",
- "frame-system",
- "headers-relay",
- "pallet-transaction-payment",
+ "pallet-bridge-dispatch",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
+ "scale-info",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
@@ -5968,19 +8258,31 @@ dependencies = [
 name = "relay-rialto-client"
 version = "0.1.0"
 dependencies = [
+ "bp-rialto",
  "frame-support",
  "frame-system",
- "headers-relay",
  "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
  "rialto-runtime",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
+[[package]]
+name = "relay-rialto-parachain-client"
+version = "0.1.0"
+dependencies = [
+ "bp-rialto",
+ "frame-support",
+ "frame-system",
+ "pallet-transaction-payment",
+ "relay-substrate-client",
+ "relay-utils",
+ "rialto-parachain-runtime",
+]
+
 [[package]]
 name = "relay-rococo-client"
 version = "0.1.0"
@@ -5994,16 +8296,13 @@ dependencies = [
  "bp-wococo",
  "bridge-runtime-common",
  "frame-support",
- "frame-system",
- "headers-relay",
  "pallet-bridge-dispatch",
  "pallet-bridge-messages",
- "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
+ "scale-info",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
@@ -6014,29 +8313,33 @@ dependencies = [
  "async-std",
  "async-trait",
  "bp-header-chain",
- "bp-messages",
  "bp-runtime",
  "finality-relay",
  "frame-support",
  "frame-system",
- "futures 0.3.13",
+ "futures 0.3.17",
  "headers-relay",
  "jsonrpsee-proc-macros",
  "jsonrpsee-ws-client",
  "log",
  "num-traits",
  "pallet-balances",
+ "pallet-transaction-payment",
+ "pallet-transaction-payment-rpc-runtime-api",
  "parity-scale-codec",
  "rand 0.7.3",
  "relay-utils",
  "sc-rpc-api",
+ "sc-transaction-pool-api",
  "sp-core",
  "sp-finality-grandpa",
+ "sp-rpc",
  "sp-runtime",
- "sp-std",
  "sp-storage",
  "sp-trie",
  "sp-version",
+ "thiserror",
+ "tokio",
 ]
 
 [[package]]
@@ -6044,11 +8347,13 @@ name = "relay-utils"
 version = "0.1.0"
 dependencies = [
  "ansi_term 0.12.1",
+ "anyhow",
  "async-std",
  "async-trait",
  "backoff",
- "env_logger 0.8.3",
- "futures 0.3.13",
+ "bp-runtime",
+ "env_logger 0.8.4",
+ "futures 0.3.17",
  "isahc",
  "jsonpath_lib",
  "log",
@@ -6056,7 +8361,8 @@ dependencies = [
  "serde_json",
  "substrate-prometheus-endpoint",
  "sysinfo",
- "time 0.2.25",
+ "thiserror",
+ "time 0.2.27",
 ]
 
 [[package]]
@@ -6064,15 +8370,10 @@ name = "relay-westend-client"
 version = "0.1.0"
 dependencies = [
  "bp-westend",
- "frame-support",
- "frame-system",
- "headers-relay",
- "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
@@ -6089,19 +8390,34 @@ dependencies = [
  "bp-wococo",
  "bridge-runtime-common",
  "frame-support",
- "frame-system",
- "headers-relay",
  "pallet-bridge-dispatch",
  "pallet-bridge-messages",
- "pallet-transaction-payment",
  "parity-scale-codec",
  "relay-substrate-client",
  "relay-utils",
+ "scale-info",
  "sp-core",
- "sp-keyring",
  "sp-runtime",
 ]
 
+[[package]]
+name = "remote-externalities"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "env_logger 0.9.0",
+ "jsonrpsee-proc-macros",
+ "jsonrpsee-ws-client",
+ "log",
+ "parity-scale-codec",
+ "serde",
+ "serde_json",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-version",
+]
+
 [[package]]
 name = "remove_dir_all"
 version = "0.5.3"
@@ -6111,6 +8427,41 @@ dependencies = [
  "winapi 0.3.9",
 ]
 
+[[package]]
+name = "reqwest"
+version = "0.11.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22"
+dependencies = [
+ "base64 0.13.0",
+ "bytes 1.1.0",
+ "encoding_rs",
+ "futures-core",
+ "futures-util",
+ "http",
+ "http-body",
+ "hyper",
+ "hyper-tls",
+ "ipnet",
+ "js-sys",
+ "lazy_static",
+ "log",
+ "mime",
+ "native-tls",
+ "percent-encoding 2.1.0",
+ "pin-project-lite 0.2.7",
+ "serde",
+ "serde_json",
+ "serde_urlencoded",
+ "tokio",
+ "tokio-native-tls",
+ "url 2.2.2",
+ "wasm-bindgen",
+ "wasm-bindgen-futures",
+ "web-sys",
+ "winreg 0.7.0",
+]
+
 [[package]]
 name = "resolv-conf"
 version = "0.7.0"
@@ -6131,40 +8482,198 @@ checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b"
 name = "rialto-bridge-node"
 version = "0.1.0"
 dependencies = [
- "bp-messages",
  "bp-rialto",
  "bp-runtime",
  "frame-benchmarking",
  "frame-benchmarking-cli",
- "jsonrpc-core 15.1.0",
+ "frame-system-rpc-runtime-api",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
+ "kvdb",
+ "kvdb-rocksdb 0.12.1",
+ "lru 0.7.0",
  "node-inspect",
  "pallet-bridge-messages",
  "pallet-transaction-payment-rpc",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "polkadot-approval-distribution",
+ "polkadot-availability-bitfield-distribution",
+ "polkadot-availability-distribution",
+ "polkadot-availability-recovery",
+ "polkadot-collator-protocol",
+ "polkadot-dispute-distribution",
+ "polkadot-gossip-support",
+ "polkadot-network-bridge",
+ "polkadot-node-collation-generation",
+ "polkadot-node-core-approval-voting",
+ "polkadot-node-core-av-store",
+ "polkadot-node-core-backing",
+ "polkadot-node-core-bitfield-signing",
+ "polkadot-node-core-candidate-validation",
+ "polkadot-node-core-chain-api",
+ "polkadot-node-core-chain-selection",
+ "polkadot-node-core-dispute-coordinator",
+ "polkadot-node-core-dispute-participation",
+ "polkadot-node-core-parachains-inherent",
+ "polkadot-node-core-provisioner",
+ "polkadot-node-core-pvf",
+ "polkadot-node-core-runtime-api",
+ "polkadot-node-network-protocol",
+ "polkadot-node-subsystem-util",
+ "polkadot-overseer",
+ "polkadot-primitives",
+ "polkadot-runtime-parachains",
+ "polkadot-statement-distribution",
  "rialto-runtime",
+ "sc-authority-discovery",
  "sc-basic-authorship",
  "sc-cli",
  "sc-client-api",
  "sc-consensus",
- "sc-consensus-aura",
+ "sc-consensus-babe",
+ "sc-consensus-slots",
+ "sc-consensus-uncles",
  "sc-executor",
  "sc-finality-grandpa",
  "sc-finality-grandpa-rpc",
  "sc-keystore",
+ "sc-network",
  "sc-rpc",
  "sc-service",
  "sc-telemetry",
  "sc-transaction-pool",
  "serde_json",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-authorship",
+ "sp-block-builder",
+ "sp-blockchain",
  "sp-consensus",
- "sp-consensus-aura",
+ "sp-consensus-babe",
  "sp-core",
  "sp-finality-grandpa",
  "sp-inherents",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-timestamp",
+ "sp-transaction-pool",
+ "structopt",
+ "substrate-build-script-utils",
+ "substrate-frame-rpc-system",
+ "substrate-prometheus-endpoint",
+ "thiserror",
+]
+
+[[package]]
+name = "rialto-parachain-collator"
+version = "0.1.0"
+dependencies = [
+ "cumulus-client-cli",
+ "cumulus-client-collator",
+ "cumulus-client-consensus-aura",
+ "cumulus-client-consensus-common",
+ "cumulus-client-network",
+ "cumulus-client-service",
+ "cumulus-primitives-core",
+ "cumulus-primitives-parachain-inherent",
+ "derive_more",
+ "frame-benchmarking",
+ "frame-benchmarking-cli",
+ "hex-literal 0.3.3",
+ "jsonrpc-core 18.0.0",
+ "log",
+ "pallet-transaction-payment-rpc",
+ "parity-scale-codec",
+ "polkadot-cli",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "polkadot-service",
+ "polkadot-test-service",
+ "rialto-parachain-runtime",
+ "sc-basic-authorship",
+ "sc-chain-spec",
+ "sc-cli",
+ "sc-client-api",
+ "sc-consensus",
+ "sc-executor",
+ "sc-keystore",
+ "sc-network",
+ "sc-rpc",
+ "sc-rpc-api",
+ "sc-service",
+ "sc-telemetry",
+ "sc-tracing",
+ "sc-transaction-pool",
+ "serde",
+ "sp-api",
+ "sp-block-builder",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-aura",
+ "sp-core",
+ "sp-inherents",
+ "sp-keystore",
+ "sp-offchain",
  "sp-runtime",
+ "sp-session",
  "sp-timestamp",
+ "sp-transaction-pool",
  "structopt",
  "substrate-build-script-utils",
  "substrate-frame-rpc-system",
+ "substrate-prometheus-endpoint",
+]
+
+[[package]]
+name = "rialto-parachain-runtime"
+version = "0.1.0"
+dependencies = [
+ "bp-rialto-parachain",
+ "cumulus-pallet-aura-ext",
+ "cumulus-pallet-dmp-queue",
+ "cumulus-pallet-parachain-system",
+ "cumulus-pallet-xcm",
+ "cumulus-pallet-xcmp-queue",
+ "cumulus-primitives-core",
+ "cumulus-primitives-timestamp",
+ "cumulus-primitives-utility",
+ "frame-benchmarking",
+ "frame-executive",
+ "frame-support",
+ "frame-system",
+ "frame-system-benchmarking",
+ "frame-system-rpc-runtime-api",
+ "log",
+ "pallet-aura",
+ "pallet-balances",
+ "pallet-randomness-collective-flip",
+ "pallet-sudo",
+ "pallet-timestamp",
+ "pallet-transaction-payment",
+ "pallet-transaction-payment-rpc-runtime-api",
+ "pallet-xcm",
+ "parachain-info",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "scale-info",
+ "serde",
+ "sp-api",
+ "sp-block-builder",
+ "sp-consensus-aura",
+ "sp-core",
+ "sp-inherents",
+ "sp-io",
+ "sp-offchain",
+ "sp-runtime",
+ "sp-session",
+ "sp-std",
+ "sp-transaction-pool",
+ "sp-version",
+ "substrate-wasm-builder",
+ "xcm",
+ "xcm-builder",
+ "xcm-executor",
 ]
 
 [[package]]
@@ -6185,10 +8694,11 @@ dependencies = [
  "frame-support",
  "frame-system",
  "frame-system-rpc-runtime-api",
- "hex-literal 0.3.1",
- "libsecp256k1",
+ "hex-literal 0.3.3",
+ "libsecp256k1 0.7.0",
  "log",
- "pallet-aura",
+ "pallet-authority-discovery",
+ "pallet-babe",
  "pallet-balances",
  "pallet-bridge-currency-exchange",
  "pallet-bridge-dispatch",
@@ -6196,7 +8706,6 @@ dependencies = [
  "pallet-bridge-grandpa",
  "pallet-bridge-messages",
  "pallet-grandpa",
- "pallet-randomness-collective-flip",
  "pallet-session",
  "pallet-shift-session-manager",
  "pallet-sudo",
@@ -6204,10 +8713,15 @@ dependencies = [
  "pallet-transaction-payment",
  "pallet-transaction-payment-rpc-runtime-api",
  "parity-scale-codec",
+ "polkadot-primitives",
+ "polkadot-runtime-common",
+ "polkadot-runtime-parachains",
+ "scale-info",
  "serde",
  "sp-api",
+ "sp-authority-discovery",
  "sp-block-builder",
- "sp-consensus-aura",
+ "sp-consensus-babe",
  "sp-core",
  "sp-finality-grandpa",
  "sp-inherents",
@@ -6239,19 +8753,19 @@ dependencies = [
 
 [[package]]
 name = "rlp"
-version = "0.5.0"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e54369147e3e7796c9b885c7304db87ca3d09a0a98f72843d532868675bbfba8"
+checksum = "999508abb0ae792aabed2460c45b89106d97fe4adac593bdaef433c2605847b5"
 dependencies = [
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "rustc-hex",
 ]
 
 [[package]]
 name = "rocksdb"
-version = "0.15.0"
+version = "0.17.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23d83c02c429044d58474eaf5ae31e062d0de894e21125b47437ec0edc1397e6"
+checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7"
 dependencies = [
  "libc",
  "librocksdb-sys",
@@ -6259,31 +8773,19 @@ dependencies = [
 
 [[package]]
 name = "rpassword"
-version = "5.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb"
-dependencies = [
- "libc",
- "winapi 0.3.9",
-]
-
-[[package]]
-name = "rust-argon2"
-version = "0.8.3"
+version = "5.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb"
+checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb"
 dependencies = [
- "base64 0.13.0",
- "blake2b_simd",
- "constant_time_eq",
- "crossbeam-utils 0.8.3",
+ "libc",
+ "winapi 0.3.9",
 ]
 
 [[package]]
 name = "rustc-demangle"
-version = "0.1.18"
+version = "0.1.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
 
 [[package]]
 name = "rustc-hash"
@@ -6307,61 +8809,47 @@ dependencies = [
 ]
 
 [[package]]
-name = "rustls"
-version = "0.18.1"
+name = "rustc_version"
+version = "0.3.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81"
+checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee"
 dependencies = [
- "base64 0.12.3",
- "log",
- "ring",
- "sct",
- "webpki 0.21.4",
+ "semver 0.11.0",
 ]
 
 [[package]]
 name = "rustls"
-version = "0.19.0"
+version = "0.19.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b"
+checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7"
 dependencies = [
  "base64 0.13.0",
  "log",
  "ring",
  "sct",
- "webpki 0.21.4",
+ "webpki",
 ]
 
 [[package]]
 name = "rustls-native-certs"
-version = "0.4.0"
+version = "0.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8"
+checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092"
 dependencies = [
  "openssl-probe",
- "rustls 0.18.1",
+ "rustls",
  "schannel",
  "security-framework",
 ]
 
-[[package]]
-name = "ruzstd"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d425143485a37727c7a46e689bbe3b883a00f42b4a52c4ac0f44855c1009b00"
-dependencies = [
- "byteorder",
- "twox-hash",
-]
-
 [[package]]
 name = "rw-stream-sink"
 version = "0.2.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020"
 dependencies = [
- "futures 0.3.13",
- "pin-project 0.4.27",
+ "futures 0.3.17",
+ "pin-project 0.4.28",
  "static_assertions",
 ]
 
@@ -6377,14 +8865,14 @@ version = "1.0.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6d3d055a2582e6b00ed7a31c1524040aa391092bf636328350813f3a0605215c"
 dependencies = [
- "rustc_version",
+ "rustc_version 0.2.3",
 ]
 
 [[package]]
 name = "salsa20"
-version = "0.7.2"
+version = "0.8.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "399f290ffc409596022fce5ea5d4138184be4784f2b28c62c59f0d8389059a15"
+checksum = "ecbd2eb639fd7cab5804a0837fe373cc2172d15437e804c054a9fb885cb923b0"
 dependencies = [
  "cipher",
 ]
@@ -6398,12 +8886,50 @@ dependencies = [
  "winapi-util",
 ]
 
+[[package]]
+name = "sc-allocator"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "log",
+ "sp-core",
+ "sp-wasm-interface",
+ "thiserror",
+]
+
+[[package]]
+name = "sc-authority-discovery"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "async-trait",
+ "derive_more",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "ip_network",
+ "libp2p",
+ "log",
+ "parity-scale-codec",
+ "prost",
+ "prost-build",
+ "rand 0.7.3",
+ "sc-client-api",
+ "sc-network",
+ "sp-api",
+ "sp-authority-discovery",
+ "sp-blockchain",
+ "sp-core",
+ "sp-keystore",
+ "sp-runtime",
+ "substrate-prometheus-endpoint",
+]
+
 [[package]]
 name = "sc-basic-authorship"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "log",
  "parity-scale-codec",
@@ -6411,20 +8937,20 @@ dependencies = [
  "sc-client-api",
  "sc-proposer-metrics",
  "sc-telemetry",
+ "sc-transaction-pool-api",
  "sp-api",
  "sp-blockchain",
  "sp-consensus",
  "sp-core",
  "sp-inherents",
  "sp-runtime",
- "sp-transaction-pool",
  "substrate-prometheus-endpoint",
 ]
 
 [[package]]
 name = "sc-block-builder"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
  "sc-client-api",
@@ -6439,31 +8965,26 @@ dependencies = [
 
 [[package]]
 name = "sc-chain-spec"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "impl-trait-for-tuples",
  "parity-scale-codec",
  "sc-chain-spec-derive",
- "sc-consensus-babe",
- "sc-consensus-epochs",
- "sc-finality-grandpa",
  "sc-network",
  "sc-telemetry",
  "serde",
  "serde_json",
- "sp-chain-spec",
- "sp-consensus-babe",
  "sp-core",
  "sp-runtime",
 ]
 
 [[package]]
 name = "sc-chain-spec-derive"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -6471,12 +8992,12 @@ dependencies = [
 
 [[package]]
 name = "sc-cli"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "chrono",
  "fdlimit",
- "futures 0.3.13",
+ "futures 0.3.17",
  "hex",
  "libp2p",
  "log",
@@ -6491,6 +9012,7 @@ dependencies = [
  "sc-service",
  "sc-telemetry",
  "sc-tracing",
+ "sc-utils",
  "serde",
  "serde_json",
  "sp-blockchain",
@@ -6499,104 +9021,103 @@ dependencies = [
  "sp-keystore",
  "sp-panic-handler",
  "sp-runtime",
- "sp-utils",
  "sp-version",
  "structopt",
  "thiserror",
  "tiny-bip39",
- "tokio 0.2.25",
+ "tokio",
 ]
 
 [[package]]
 name = "sc-client-api"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "derive_more",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "hash-db",
- "kvdb",
- "lazy_static",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-executor",
+ "sc-transaction-pool-api",
+ "sc-utils",
  "sp-api",
  "sp-blockchain",
  "sp-consensus",
  "sp-core",
  "sp-database",
  "sp-externalities",
- "sp-inherents",
  "sp-keystore",
  "sp-runtime",
  "sp-state-machine",
- "sp-std",
  "sp-storage",
- "sp-transaction-pool",
  "sp-trie",
- "sp-utils",
- "sp-version",
  "substrate-prometheus-endpoint",
 ]
 
 [[package]]
 name = "sc-client-db"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "blake2-rfc",
  "hash-db",
  "kvdb",
  "kvdb-memorydb",
- "kvdb-rocksdb",
+ "kvdb-rocksdb 0.14.0",
  "linked-hash-map",
  "log",
  "parity-db",
  "parity-scale-codec",
- "parity-util-mem",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-client-api",
- "sc-executor",
  "sc-state-db",
  "sp-arithmetic",
  "sp-blockchain",
- "sp-consensus",
  "sp-core",
  "sp-database",
  "sp-runtime",
  "sp-state-machine",
  "sp-trie",
- "substrate-prometheus-endpoint",
 ]
 
 [[package]]
 name = "sc-consensus"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
- "parking_lot 0.11.1",
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "libp2p",
+ "log",
+ "parking_lot 0.11.2",
  "sc-client-api",
+ "sc-utils",
+ "serde",
+ "sp-api",
  "sp-blockchain",
  "sp-consensus",
+ "sp-core",
  "sp-runtime",
+ "sp-state-machine",
+ "substrate-prometheus-endpoint",
+ "thiserror",
 ]
 
 [[package]]
 name = "sc-consensus-aura"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "derive_more",
- "futures 0.3.13",
- "futures-timer 3.0.2",
+ "futures 0.3.17",
  "log",
  "parity-scale-codec",
  "sc-block-builder",
  "sc-client-api",
+ "sc-consensus",
  "sc-consensus-slots",
  "sc-telemetry",
  "sp-api",
@@ -6608,37 +9129,33 @@ dependencies = [
  "sp-consensus-slots",
  "sp-core",
  "sp-inherents",
- "sp-io",
  "sp-keystore",
  "sp-runtime",
- "sp-version",
  "substrate-prometheus-endpoint",
 ]
 
 [[package]]
 name = "sc-consensus-babe"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "derive_more",
  "fork-tree",
- "futures 0.3.13",
- "futures-timer 3.0.2",
+ "futures 0.3.17",
  "log",
  "merlin",
  "num-bigint",
- "num-rational",
+ "num-rational 0.2.4",
  "num-traits",
  "parity-scale-codec",
- "parking_lot 0.11.1",
- "pdqselect",
+ "parking_lot 0.11.2",
  "rand 0.7.3",
  "retain_mut",
  "sc-client-api",
+ "sc-consensus",
  "sc-consensus-epochs",
  "sc-consensus-slots",
- "sc-consensus-uncles",
  "sc-keystore",
  "sc-telemetry",
  "schnorrkel",
@@ -6656,15 +9173,38 @@ dependencies = [
  "sp-io",
  "sp-keystore",
  "sp-runtime",
- "sp-utils",
  "sp-version",
  "substrate-prometheus-endpoint",
 ]
 
+[[package]]
+name = "sc-consensus-babe-rpc"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "sc-consensus-babe",
+ "sc-consensus-epochs",
+ "sc-rpc-api",
+ "serde",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-consensus-babe",
+ "sp-core",
+ "sp-keystore",
+ "sp-runtime",
+]
+
 [[package]]
 name = "sc-consensus-epochs"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "fork-tree",
  "parity-scale-codec",
@@ -6676,19 +9216,18 @@ dependencies = [
 
 [[package]]
 name = "sc-consensus-slots"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
- "impl-trait-for-tuples",
  "log",
  "parity-scale-codec",
  "sc-client-api",
+ "sc-consensus",
  "sc-telemetry",
  "sp-api",
- "sp-application-crypto",
  "sp-arithmetic",
  "sp-blockchain",
  "sp-consensus",
@@ -6698,14 +9237,13 @@ dependencies = [
  "sp-runtime",
  "sp-state-machine",
  "sp-timestamp",
- "sp-trie",
  "thiserror",
 ]
 
 [[package]]
 name = "sc-consensus-uncles"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "sc-client-api",
  "sp-authorship",
@@ -6715,16 +9253,14 @@ dependencies = [
 
 [[package]]
 name = "sc-executor"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "derive_more",
  "lazy_static",
- "libsecp256k1",
+ "libsecp256k1 0.6.0",
  "log",
  "parity-scale-codec",
- "parity-wasm 0.42.2",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-executor-common",
  "sc-executor-wasmi",
  "sc-executor-wasmtime",
@@ -6734,7 +9270,6 @@ dependencies = [
  "sp-io",
  "sp-panic-handler",
  "sp-runtime-interface",
- "sp-serializer",
  "sp-tasks",
  "sp-trie",
  "sp-version",
@@ -6744,13 +9279,14 @@ dependencies = [
 
 [[package]]
 name = "sc-executor-common"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "derive_more",
+ "environmental",
  "parity-scale-codec",
  "pwasm-utils",
- "sp-allocator",
+ "sc-allocator",
  "sp-core",
  "sp-maybe-compressed-blob",
  "sp-serializer",
@@ -6761,13 +9297,14 @@ dependencies = [
 
 [[package]]
 name = "sc-executor-wasmi"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "log",
  "parity-scale-codec",
+ "sc-allocator",
  "sc-executor-common",
- "sp-allocator",
+ "scoped-tls",
  "sp-core",
  "sp-runtime-interface",
  "sp-wasm-interface",
@@ -6776,17 +9313,16 @@ dependencies = [
 
 [[package]]
 name = "sc-executor-wasmtime"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "cfg-if 1.0.0",
  "libc",
  "log",
  "parity-scale-codec",
  "parity-wasm 0.42.2",
+ "sc-allocator",
  "sc-executor-common",
- "scoped-tls",
- "sp-allocator",
  "sp-core",
  "sp-runtime-interface",
  "sp-wasm-interface",
@@ -6795,22 +9331,20 @@ dependencies = [
 
 [[package]]
 name = "sc-finality-grandpa"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "derive_more",
  "dyn-clone",
  "finality-grandpa",
  "fork-tree",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
- "linked-hash-map",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
- "rand 0.7.3",
+ "parking_lot 0.11.2",
+ "rand 0.8.4",
  "sc-block-builder",
  "sc-client-api",
  "sc-consensus",
@@ -6818,6 +9352,7 @@ dependencies = [
  "sc-network",
  "sc-network-gossip",
  "sc-telemetry",
+ "sc-utils",
  "serde_json",
  "sp-api",
  "sp-application-crypto",
@@ -6826,23 +9361,20 @@ dependencies = [
  "sp-consensus",
  "sp-core",
  "sp-finality-grandpa",
- "sp-inherents",
  "sp-keystore",
  "sp-runtime",
- "sp-utils",
  "substrate-prometheus-endpoint",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sc-finality-grandpa-rpc"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "derive_more",
  "finality-grandpa",
- "futures 0.3.13",
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-core-client",
  "jsonrpc-derive",
  "jsonrpc-pubsub",
@@ -6860,51 +9392,44 @@ dependencies = [
 
 [[package]]
 name = "sc-informant"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "ansi_term 0.12.1",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "log",
  "parity-util-mem",
  "sc-client-api",
  "sc-network",
+ "sc-transaction-pool-api",
  "sp-blockchain",
  "sp-runtime",
- "sp-transaction-pool",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sc-keystore"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "derive_more",
- "futures 0.3.13",
- "futures-util",
  "hex",
- "merlin",
- "parking_lot 0.11.1",
- "rand 0.7.3",
+ "parking_lot 0.11.2",
  "serde_json",
  "sp-application-crypto",
  "sp-core",
  "sp-keystore",
- "subtle 2.4.0",
 ]
 
 [[package]]
 name = "sc-light"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "hash-db",
- "lazy_static",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-client-api",
  "sc-executor",
  "sp-api",
@@ -6917,22 +9442,20 @@ dependencies = [
 
 [[package]]
 name = "sc-network"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-std",
  "async-trait",
  "asynchronous-codec 0.5.0",
  "bitflags",
- "bs58",
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "cid",
  "derive_more",
  "either",
- "erased-serde",
  "fnv",
  "fork-tree",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "hex",
  "ip_network",
@@ -6940,96 +9463,94 @@ dependencies = [
  "linked-hash-map",
  "linked_hash_set",
  "log",
- "lru",
- "nohash-hasher",
+ "lru 0.6.6",
  "parity-scale-codec",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
  "prost",
  "prost-build",
  "rand 0.7.3",
  "sc-block-builder",
  "sc-client-api",
+ "sc-consensus",
  "sc-peerset",
+ "sc-utils",
  "serde",
  "serde_json",
- "smallvec 1.6.1",
+ "smallvec",
  "sp-arithmetic",
  "sp-blockchain",
  "sp-consensus",
  "sp-core",
+ "sp-finality-grandpa",
  "sp-runtime",
- "sp-utils",
  "substrate-prometheus-endpoint",
  "thiserror",
  "unsigned-varint 0.6.0",
  "void",
- "wasm-timer",
  "zeroize",
 ]
 
 [[package]]
 name = "sc-network-gossip"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "libp2p",
  "log",
- "lru",
+ "lru 0.6.6",
  "sc-network",
  "sp-runtime",
  "substrate-prometheus-endpoint",
  "tracing",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sc-offchain"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "bytes 0.5.6",
+ "bytes 1.1.0",
  "fnv",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "hex",
- "hyper 0.13.10",
+ "hyper",
  "hyper-rustls",
  "log",
  "num_cpus",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "rand 0.7.3",
  "sc-client-api",
- "sc-keystore",
  "sc-network",
+ "sc-utils",
  "sp-api",
  "sp-core",
  "sp-offchain",
  "sp-runtime",
- "sp-utils",
  "threadpool",
 ]
 
 [[package]]
 name = "sc-peerset"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p",
  "log",
+ "sc-utils",
  "serde_json",
- "sp-utils",
  "wasm-timer",
 ]
 
 [[package]]
 name = "sc-proposer-metrics"
 version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "log",
  "substrate-prometheus-endpoint",
@@ -7037,107 +9558,101 @@ dependencies = [
 
 [[package]]
 name = "sc-rpc"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "hash-db",
- "jsonrpc-core 15.1.0",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-pubsub",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-block-builder",
+ "sc-chain-spec",
  "sc-client-api",
- "sc-executor",
- "sc-keystore",
  "sc-rpc-api",
  "sc-tracing",
+ "sc-transaction-pool-api",
+ "sc-utils",
  "serde_json",
  "sp-api",
  "sp-blockchain",
- "sp-chain-spec",
  "sp-core",
  "sp-keystore",
  "sp-offchain",
  "sp-rpc",
  "sp-runtime",
  "sp-session",
- "sp-state-machine",
- "sp-tracing",
- "sp-transaction-pool",
- "sp-utils",
  "sp-version",
 ]
 
 [[package]]
 name = "sc-rpc-api"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "derive_more",
- "futures 0.3.13",
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-core-client",
  "jsonrpc-derive",
  "jsonrpc-pubsub",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
+ "sc-chain-spec",
+ "sc-transaction-pool-api",
  "serde",
  "serde_json",
- "sp-chain-spec",
  "sp-core",
  "sp-rpc",
  "sp-runtime",
  "sp-tracing",
- "sp-transaction-pool",
  "sp-version",
+ "thiserror",
 ]
 
 [[package]]
 name = "sc-rpc-server"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.1.31",
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-http-server",
  "jsonrpc-ipc-server",
  "jsonrpc-pubsub",
  "jsonrpc-ws-server",
  "log",
- "serde",
  "serde_json",
- "sp-runtime",
  "substrate-prometheus-endpoint",
+ "tokio",
 ]
 
 [[package]]
 name = "sc-service"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "directories",
  "exit-future",
- "futures 0.1.31",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
  "hash-db",
- "jsonrpc-core 15.1.0",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-pubsub",
- "lazy_static",
  "log",
  "parity-scale-codec",
  "parity-util-mem",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
  "rand 0.7.3",
  "sc-block-builder",
  "sc-chain-spec",
  "sc-client-api",
  "sc-client-db",
+ "sc-consensus",
  "sc-executor",
  "sc-informant",
  "sc-keystore",
@@ -7149,6 +9664,8 @@ dependencies = [
  "sc-telemetry",
  "sc-tracing",
  "sc-transaction-pool",
+ "sc-transaction-pool-api",
+ "sc-utils",
  "serde",
  "serde_json",
  "sp-api",
@@ -7159,7 +9676,6 @@ dependencies = [
  "sp-core",
  "sp-externalities",
  "sp-inherents",
- "sp-io",
  "sp-keystore",
  "sp-runtime",
  "sp-session",
@@ -7169,144 +9685,186 @@ dependencies = [
  "sp-transaction-pool",
  "sp-transaction-storage-proof",
  "sp-trie",
- "sp-utils",
  "sp-version",
  "substrate-prometheus-endpoint",
  "tempfile",
  "thiserror",
+ "tokio",
  "tracing",
  "tracing-futures",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sc-state-db"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "log",
  "parity-scale-codec",
  "parity-util-mem",
  "parity-util-mem-derive",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sc-client-api",
  "sp-core",
+]
+
+[[package]]
+name = "sc-sync-state-rpc"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "jsonrpc-core 18.0.0",
+ "jsonrpc-core-client",
+ "jsonrpc-derive",
+ "parity-scale-codec",
+ "sc-chain-spec",
+ "sc-client-api",
+ "sc-consensus-babe",
+ "sc-consensus-epochs",
+ "sc-finality-grandpa",
+ "sc-rpc-api",
+ "serde",
+ "serde_json",
+ "sp-blockchain",
+ "sp-runtime",
  "thiserror",
 ]
 
 [[package]]
 name = "sc-telemetry"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "chrono",
- "futures 0.3.13",
+ "futures 0.3.17",
  "libp2p",
  "log",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
  "rand 0.7.3",
  "serde",
  "serde_json",
- "take_mut",
  "thiserror",
- "void",
  "wasm-timer",
 ]
 
 [[package]]
 name = "sc-tracing"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "ansi_term 0.12.1",
  "atty",
- "erased-serde",
+ "chrono",
  "lazy_static",
  "log",
  "once_cell",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "regex",
  "rustc-hash",
  "sc-client-api",
  "sc-rpc-server",
- "sc-telemetry",
  "sc-tracing-proc-macro",
  "serde",
- "serde_json",
  "sp-api",
- "sp-block-builder",
  "sp-blockchain",
  "sp-core",
  "sp-rpc",
  "sp-runtime",
- "sp-storage",
  "sp-tracing",
  "thiserror",
  "tracing",
  "tracing-log",
  "tracing-subscriber",
- "wasm-bindgen",
- "wasm-timer",
- "web-sys",
 ]
 
 [[package]]
 name = "sc-tracing-proc-macro"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
 ]
 
 [[package]]
-name = "sc-transaction-graph"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "sc-transaction-pool"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "futures 0.3.17",
+ "intervalier",
+ "linked-hash-map",
+ "log",
+ "parity-scale-codec",
+ "parity-util-mem",
+ "parking_lot 0.11.2",
+ "retain_mut",
+ "sc-client-api",
+ "sc-transaction-pool-api",
+ "sc-utils",
+ "serde",
+ "sp-api",
+ "sp-blockchain",
+ "sp-core",
+ "sp-runtime",
+ "sp-tracing",
+ "sp-transaction-pool",
+ "substrate-prometheus-endpoint",
+ "thiserror",
+]
+
+[[package]]
+name = "sc-transaction-pool-api"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "derive_more",
+ "futures 0.3.17",
+ "log",
+ "serde",
+ "sp-blockchain",
+ "sp-runtime",
+ "thiserror",
+]
+
+[[package]]
+name = "sc-utils"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "futures 0.3.17",
+ "futures-timer 3.0.2",
+ "lazy_static",
+ "prometheus",
+]
+
+[[package]]
+name = "scale-info"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f"
 dependencies = [
+ "bitvec 0.20.4",
+ "cfg-if 1.0.0",
  "derive_more",
- "futures 0.3.13",
- "linked-hash-map",
- "log",
- "parity-util-mem",
- "parking_lot 0.11.1",
- "retain_mut",
+ "parity-scale-codec",
+ "scale-info-derive",
  "serde",
- "sp-blockchain",
- "sp-core",
- "sp-runtime",
- "sp-transaction-pool",
- "sp-utils",
- "thiserror",
- "wasm-timer",
 ]
 
 [[package]]
-name = "sc-transaction-pool"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "scale-info-derive"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd"
 dependencies = [
- "futures 0.3.13",
- "intervalier",
- "log",
- "parity-scale-codec",
- "parity-util-mem",
- "parking_lot 0.11.1",
- "sc-client-api",
- "sc-transaction-graph",
- "sp-api",
- "sp-blockchain",
- "sp-core",
- "sp-runtime",
- "sp-tracing",
- "sp-transaction-pool",
- "sp-utils",
- "substrate-prometheus-endpoint",
- "thiserror",
- "wasm-timer",
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
@@ -7327,13 +9885,13 @@ checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862"
 dependencies = [
  "arrayref",
  "arrayvec 0.5.2",
- "curve25519-dalek 2.1.2",
+ "curve25519-dalek 2.1.3",
  "getrandom 0.1.16",
  "merlin",
  "rand 0.7.3",
  "rand_core 0.5.1",
  "sha2 0.8.2",
- "subtle 2.4.0",
+ "subtle",
  "zeroize",
 ]
 
@@ -7371,43 +9929,61 @@ dependencies = [
 
 [[package]]
 name = "sct"
-version = "0.6.0"
+version = "0.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c"
+checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce"
 dependencies = [
  "ring",
  "untrusted",
 ]
 
+[[package]]
+name = "secp256k1"
+version = "0.20.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a"
+dependencies = [
+ "secp256k1-sys",
+]
+
+[[package]]
+name = "secp256k1-sys"
+version = "0.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "827cb7cce42533829c792fc51b82fbf18b125b45a702ef2c8be77fce65463a7b"
+dependencies = [
+ "cc",
+]
+
 [[package]]
 name = "secrecy"
-version = "0.7.0"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0"
+checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e"
 dependencies = [
  "zeroize",
 ]
 
 [[package]]
 name = "security-framework"
-version = "1.0.0"
+version = "2.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ad502866817f0575705bd7be36e2b2535cc33262d493aa733a2ec862baa2bc2b"
+checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87"
 dependencies = [
  "bitflags",
  "core-foundation",
- "core-foundation-sys 0.7.0",
+ "core-foundation-sys",
  "libc",
  "security-framework-sys",
 ]
 
 [[package]]
 name = "security-framework-sys"
-version = "1.0.0"
+version = "2.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "51ceb04988b17b6d1dcd555390fa822ca5637b4a14e1f5099f13d351bed4d6c7"
+checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e"
 dependencies = [
- "core-foundation-sys 0.7.0",
+ "core-foundation-sys",
  "libc",
 ]
 
@@ -7456,9 +10032,9 @@ dependencies = [
 
 [[package]]
 name = "serde"
-version = "1.0.124"
+version = "1.0.130"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd761ff957cb2a45fbb9ab3da6512de9de55872866160b23c25f1a841e99d29f"
+checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913"
 dependencies = [
  "serde_derive",
 ]
@@ -7475,9 +10051,9 @@ dependencies = [
 
 [[package]]
 name = "serde_derive"
-version = "1.0.124"
+version = "1.0.130"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1800f7693e94e186f5e25a28291ae1570da908aff7d97a095dec1e56ff99069b"
+checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -7486,9 +10062,9 @@ dependencies = [
 
 [[package]]
 name = "serde_json"
-version = "1.0.64"
+version = "1.0.68"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79"
+checksum = "0f690853975602e1bfe1ccbf50504d67174e3bcf340f23b5ea9992e0587a52d8"
 dependencies = [
  "indexmap",
  "itoa",
@@ -7496,6 +10072,18 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "serde_urlencoded"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9"
+dependencies = [
+ "form_urlencoded",
+ "itoa",
+ "ryu",
+ "serde",
+]
+
 [[package]]
 name = "sha-1"
 version = "0.8.2"
@@ -7510,13 +10098,13 @@ dependencies = [
 
 [[package]]
 name = "sha-1"
-version = "0.9.4"
+version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f"
+checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6"
 dependencies = [
  "block-buffer 0.9.0",
  "cfg-if 1.0.0",
- "cpuid-bool 0.1.2",
+ "cpufeatures 0.2.1",
  "digest 0.9.0",
  "opaque-debug 0.3.0",
 ]
@@ -7541,13 +10129,13 @@ dependencies = [
 
 [[package]]
 name = "sha2"
-version = "0.9.3"
+version = "0.9.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de"
+checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa"
 dependencies = [
  "block-buffer 0.9.0",
  "cfg-if 1.0.0",
- "cpuid-bool 0.1.2",
+ "cpufeatures 0.2.1",
  "digest 0.9.0",
  "opaque-debug 0.3.0",
 ]
@@ -7566,24 +10154,24 @@ dependencies = [
 
 [[package]]
 name = "sharded-slab"
-version = "0.1.1"
+version = "0.1.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3"
+checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982"
 dependencies = [
  "lazy_static",
 ]
 
 [[package]]
 name = "shlex"
-version = "0.1.1"
+version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
+checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3"
 
 [[package]]
 name = "signal-hook"
-version = "0.3.6"
+version = "0.3.10"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a7f3f92a1da3d6b1d32245d0cbcbbab0cfc45996d8df619c42bccfa6d2bbb5f"
+checksum = "9c98891d737e271a2954825ef19e46bd16bdb98e2746f2eec4f7a4ef7946efd1"
 dependencies = [
  "libc",
  "signal-hook-registry",
@@ -7591,36 +10179,36 @@ dependencies = [
 
 [[package]]
 name = "signal-hook-registry"
-version = "1.3.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6"
+checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0"
 dependencies = [
  "libc",
 ]
 
 [[package]]
 name = "signature"
-version = "1.3.0"
+version = "1.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68"
+checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335"
 
 [[package]]
 name = "simba"
-version = "0.1.5"
+version = "0.5.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2"
+checksum = "8e82063457853d00243beda9952e910b82593e4b07ae9f721b9278a99a0d3d5c"
 dependencies = [
  "approx",
  "num-complex",
  "num-traits",
- "paste 0.1.18",
+ "paste",
 ]
 
 [[package]]
 name = "slab"
-version = "0.4.2"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
+checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590"
 
 [[package]]
 name = "slog"
@@ -7631,47 +10219,65 @@ dependencies = [
  "erased-serde",
 ]
 
+[[package]]
+name = "slot-range-helper"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "enumn",
+ "parity-scale-codec",
+ "paste",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "slotmap"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342"
+dependencies = [
+ "version_check",
+]
+
 [[package]]
 name = "sluice"
-version = "0.5.4"
+version = "0.5.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fa0333a60ff2e3474a6775cc611840c2a55610c831dd366503474c02f1a28f5"
+checksum = "6d7400c0eff44aa2fcb5e31a5f24ba9716ed90138769e4977a2ba6014ae63eb5"
 dependencies = [
- "futures-channel",
+ "async-channel",
  "futures-core",
  "futures-io",
 ]
 
 [[package]]
 name = "smallvec"
-version = "0.6.14"
+version = "1.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0"
-dependencies = [
- "maybe-uninit",
-]
+checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309"
 
 [[package]]
-name = "smallvec"
-version = "1.6.1"
+name = "snap"
+version = "1.0.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e"
+checksum = "45456094d1983e2ee2a18fdfebce3189fa451699d0502cb8e3b49dba5ba41451"
 
 [[package]]
 name = "snow"
-version = "0.7.2"
+version = "0.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "795dd7aeeee24468e5a32661f6d27f7b5cbed802031b2d7640c7b10f8fb2dd50"
+checksum = "6142f7c25e94f6fd25a32c3348ec230df9109b463f59c8c7acc4bd34936babb7"
 dependencies = [
  "aes-gcm",
  "blake2",
  "chacha20poly1305",
- "rand 0.7.3",
- "rand_core 0.5.1",
+ "rand 0.8.4",
+ "rand_core 0.6.3",
  "ring",
- "rustc_version",
- "sha2 0.9.3",
- "subtle 2.4.0",
+ "rustc_version 0.3.3",
+ "sha2 0.9.8",
+ "subtle",
  "x25519-dalek",
 ]
 
@@ -7688,9 +10294,9 @@ dependencies = [
 
 [[package]]
 name = "socket2"
-version = "0.4.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
+checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad"
 dependencies = [
  "libc",
  "winapi 0.3.9",
@@ -7705,29 +10311,47 @@ dependencies = [
  "base64 0.12.3",
  "bytes 0.5.6",
  "flate2",
- "futures 0.3.13",
+ "futures 0.3.17",
  "httparse",
  "log",
  "rand 0.7.3",
- "sha-1 0.9.4",
+ "sha-1 0.9.8",
 ]
 
 [[package]]
-name = "sp-allocator"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+name = "soketto"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4919971d141dbadaa0e82b5d369e2d7666c98e4625046140615ca363e50d4daa"
 dependencies = [
+ "base64 0.13.0",
+ "bytes 1.1.0",
+ "futures 0.3.17",
+ "httparse",
  "log",
- "sp-core",
- "sp-std",
- "sp-wasm-interface",
- "thiserror",
+ "rand 0.8.4",
+ "sha-1 0.9.8",
+]
+
+[[package]]
+name = "soketto"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a74e48087dbeed4833785c2f3352b59140095dc192dce966a3bfc155020a439f"
+dependencies = [
+ "base64 0.13.0",
+ "bytes 1.1.0",
+ "futures 0.3.17",
+ "httparse",
+ "log",
+ "rand 0.8.4",
+ "sha-1 0.9.8",
 ]
 
 [[package]]
 name = "sp-api"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "hash-db",
  "log",
@@ -7743,11 +10367,11 @@ dependencies = [
 
 [[package]]
 name = "sp-api-proc-macro"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "blake2-rfc",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -7755,11 +10379,11 @@ dependencies = [
 
 [[package]]
 name = "sp-application-crypto"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "max-encoded-len",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-core",
  "sp-io",
@@ -7768,22 +10392,36 @@ dependencies = [
 
 [[package]]
 name = "sp-arithmetic"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "integer-sqrt",
  "num-traits",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-debug-derive",
  "sp-std",
  "static_assertions",
 ]
 
+[[package]]
+name = "sp-authority-discovery"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "parity-scale-codec",
+ "scale-info",
+ "sp-api",
+ "sp-application-crypto",
+ "sp-runtime",
+ "sp-std",
+]
+
 [[package]]
 name = "sp-authorship"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "parity-scale-codec",
@@ -7794,8 +10432,8 @@ dependencies = [
 
 [[package]]
 name = "sp-block-builder"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
  "sp-api",
@@ -7806,14 +10444,14 @@ dependencies = [
 
 [[package]]
 name = "sp-blockchain"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "log",
- "lru",
+ "lru 0.6.6",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sp-api",
  "sp-consensus",
  "sp-database",
@@ -7822,49 +10460,33 @@ dependencies = [
  "thiserror",
 ]
 
-[[package]]
-name = "sp-chain-spec"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
-dependencies = [
- "serde",
- "serde_json",
-]
-
 [[package]]
 name = "sp-consensus"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
- "libp2p",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
- "serde",
- "sp-api",
  "sp-core",
  "sp-inherents",
  "sp-runtime",
  "sp-state-machine",
  "sp-std",
- "sp-trie",
- "sp-utils",
  "sp-version",
- "substrate-prometheus-endpoint",
  "thiserror",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sp-consensus-aura"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "parity-scale-codec",
+ "scale-info",
  "sp-api",
  "sp-application-crypto",
  "sp-consensus",
@@ -7877,12 +10499,13 @@ dependencies = [
 
 [[package]]
 name = "sp-consensus-babe"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "merlin",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-api",
  "sp-application-crypto",
@@ -7899,18 +10522,19 @@ dependencies = [
 
 [[package]]
 name = "sp-consensus-slots"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
+ "scale-info",
  "sp-arithmetic",
  "sp-runtime",
 ]
 
 [[package]]
 name = "sp-consensus-vrf"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
  "schnorrkel",
@@ -7921,35 +10545,35 @@ dependencies = [
 
 [[package]]
 name = "sp-core"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "base58",
  "blake2-rfc",
  "byteorder",
  "dyn-clonable",
  "ed25519-dalek",
- "futures 0.3.13",
+ "futures 0.3.17",
  "hash-db",
  "hash256-std-hasher",
  "hex",
  "impl-serde",
  "lazy_static",
- "libsecp256k1",
+ "libsecp256k1 0.6.0",
  "log",
- "max-encoded-len",
  "merlin",
  "num-traits",
  "parity-scale-codec",
  "parity-util-mem",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "primitive-types",
  "rand 0.7.3",
  "regex",
+ "scale-info",
  "schnorrkel",
  "secrecy",
  "serde",
- "sha2 0.9.3",
+ "sha2 0.9.8",
  "sp-debug-derive",
  "sp-externalities",
  "sp-runtime-interface",
@@ -7966,17 +10590,17 @@ dependencies = [
 
 [[package]]
 name = "sp-database"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "kvdb",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
 ]
 
 [[package]]
 name = "sp-debug-derive"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -7985,8 +10609,8 @@ dependencies = [
 
 [[package]]
 name = "sp-externalities"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "environmental",
  "parity-scale-codec",
@@ -7996,12 +10620,13 @@ dependencies = [
 
 [[package]]
 name = "sp-finality-grandpa"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "finality-grandpa",
  "log",
  "parity-scale-codec",
+ "scale-info",
  "serde",
  "sp-api",
  "sp-application-crypto",
@@ -8013,8 +10638,8 @@ dependencies = [
 
 [[package]]
 name = "sp-inherents"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "impl-trait-for-tuples",
@@ -8027,19 +10652,18 @@ dependencies = [
 
 [[package]]
 name = "sp-io"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "hash-db",
- "libsecp256k1",
+ "libsecp256k1 0.6.0",
  "log",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "sp-core",
  "sp-externalities",
  "sp-keystore",
- "sp-maybe-compressed-blob",
  "sp-runtime-interface",
  "sp-state-machine",
  "sp-std",
@@ -8052,26 +10676,26 @@ dependencies = [
 
 [[package]]
 name = "sp-keyring"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "lazy_static",
  "sp-core",
  "sp-runtime",
- "strum",
+ "strum 0.20.0",
 ]
 
 [[package]]
 name = "sp-keystore"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "derive_more",
- "futures 0.3.13",
+ "futures 0.3.17",
  "merlin",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "schnorrkel",
  "serde",
  "sp-core",
@@ -8080,17 +10704,42 @@ dependencies = [
 
 [[package]]
 name = "sp-maybe-compressed-blob"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "ruzstd",
  "zstd",
 ]
 
+[[package]]
+name = "sp-npos-elections"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "parity-scale-codec",
+ "scale-info",
+ "serde",
+ "sp-arithmetic",
+ "sp-core",
+ "sp-npos-elections-solution-type",
+ "sp-runtime",
+ "sp-std",
+]
+
+[[package]]
+name = "sp-npos-elections-solution-type"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "proc-macro-crate 1.1.0",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "sp-offchain"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "sp-api",
  "sp-core",
@@ -8100,36 +10749,35 @@ dependencies = [
 [[package]]
 name = "sp-panic-handler"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "backtrace",
 ]
 
 [[package]]
 name = "sp-rpc"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "rustc-hash",
  "serde",
  "sp-core",
- "tracing-core",
 ]
 
 [[package]]
 name = "sp-runtime"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "either",
  "hash256-std-hasher",
  "impl-trait-for-tuples",
  "log",
- "max-encoded-len",
  "parity-scale-codec",
  "parity-util-mem",
- "paste 1.0.4",
+ "paste",
  "rand 0.7.3",
+ "scale-info",
  "serde",
  "sp-application-crypto",
  "sp-arithmetic",
@@ -8140,8 +10788,8 @@ dependencies = [
 
 [[package]]
 name = "sp-runtime-interface"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "impl-trait-for-tuples",
  "parity-scale-codec",
@@ -8157,11 +10805,11 @@ dependencies = [
 
 [[package]]
 name = "sp-runtime-interface-proc-macro"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "Inflector",
- "proc-macro-crate 1.0.0",
+ "proc-macro-crate 1.1.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -8170,7 +10818,7 @@ dependencies = [
 [[package]]
 name = "sp-serializer"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "serde",
  "serde_json",
@@ -8178,10 +10826,11 @@ dependencies = [
 
 [[package]]
 name = "sp-session"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
+ "scale-info",
  "sp-api",
  "sp-core",
  "sp-runtime",
@@ -8191,26 +10840,27 @@ dependencies = [
 
 [[package]]
 name = "sp-staking"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
+ "scale-info",
  "sp-runtime",
  "sp-std",
 ]
 
 [[package]]
 name = "sp-state-machine"
-version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "hash-db",
  "log",
  "num-traits",
  "parity-scale-codec",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "rand 0.7.3",
- "smallvec 1.6.1",
+ "smallvec",
  "sp-core",
  "sp-externalities",
  "sp-panic-handler",
@@ -8224,13 +10874,13 @@ dependencies = [
 
 [[package]]
 name = "sp-std"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 
 [[package]]
 name = "sp-storage"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "impl-serde",
  "parity-scale-codec",
@@ -8242,8 +10892,8 @@ dependencies = [
 
 [[package]]
 name = "sp-tasks"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "log",
  "sp-core",
@@ -8255,8 +10905,8 @@ dependencies = [
 
 [[package]]
 name = "sp-timestamp"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "futures-timer 3.0.2",
@@ -8267,13 +10917,12 @@ dependencies = [
  "sp-runtime",
  "sp-std",
  "thiserror",
- "wasm-timer",
 ]
 
 [[package]]
 name = "sp-tracing"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "erased-serde",
  "log",
@@ -8290,28 +10939,22 @@ dependencies = [
 
 [[package]]
 name = "sp-transaction-pool"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
- "derive_more",
- "futures 0.3.13",
- "log",
- "parity-scale-codec",
- "serde",
  "sp-api",
- "sp-blockchain",
  "sp-runtime",
- "thiserror",
 ]
 
 [[package]]
 name = "sp-transaction-storage-proof"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-trait",
  "log",
  "parity-scale-codec",
+ "scale-info",
  "sp-core",
  "sp-inherents",
  "sp-runtime",
@@ -8321,50 +10964,41 @@ dependencies = [
 
 [[package]]
 name = "sp-trie"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "hash-db",
  "memory-db",
  "parity-scale-codec",
+ "scale-info",
  "sp-core",
  "sp-std",
  "trie-db",
  "trie-root",
 ]
 
-[[package]]
-name = "sp-utils"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
-dependencies = [
- "futures 0.3.13",
- "futures-core",
- "futures-timer 3.0.2",
- "lazy_static",
- "prometheus",
-]
-
 [[package]]
 name = "sp-version"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "impl-serde",
  "parity-scale-codec",
+ "parity-wasm 0.42.2",
+ "scale-info",
  "serde",
  "sp-runtime",
  "sp-std",
  "sp-version-proc-macro",
+ "thiserror",
 ]
 
 [[package]]
 name = "sp-version-proc-macro"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "parity-scale-codec",
- "proc-macro-crate 1.0.0",
  "proc-macro2",
  "quote",
  "syn",
@@ -8372,8 +11006,8 @@ dependencies = [
 
 [[package]]
 name = "sp-wasm-interface"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "impl-trait-for-tuples",
  "parity-scale-codec",
@@ -8387,15 +11021,6 @@ version = "0.5.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
 
-[[package]]
-name = "spinning_top"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8bd0ab6b8c375d2d963503b90d3770010d95bc3b5f98036f948dee24bf4e8879"
-dependencies = [
- "lock_api 0.4.2",
-]
-
 [[package]]
 name = "stable_deref_trait"
 version = "1.2.0"
@@ -8417,13 +11042,42 @@ version = "1.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
 
+[[package]]
+name = "static_init"
+version = "0.5.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "11b73400442027c4adedda20a9f9b7945234a5bd8d5f7e86da22bd5d0622369c"
+dependencies = [
+ "cfg_aliases",
+ "libc",
+ "parking_lot 0.11.2",
+ "static_init_macro",
+]
+
+[[package]]
+name = "static_init_macro"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f2261c91034a1edc3fc4d1b80e89d82714faede0515c14a75da10cb941546bbf"
+dependencies = [
+ "cfg_aliases",
+ "memchr",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "statrs"
-version = "0.12.0"
+version = "0.15.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382"
+checksum = "05bdbb8e4e78216a85785a85d3ec3183144f98d0097b9281802c019bb07a6f05"
 dependencies = [
- "rand 0.7.3",
+ "approx",
+ "lazy_static",
+ "nalgebra",
+ "num-traits",
+ "rand 0.8.4",
 ]
 
 [[package]]
@@ -8433,7 +11087,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5"
 dependencies = [
  "discard",
- "rustc_version",
+ "rustc_version 0.2.3",
  "stdweb-derive",
  "stdweb-internal-macros",
  "stdweb-internal-runtime",
@@ -8479,45 +11133,16 @@ checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0"
 name = "storage-proof-fuzzer"
 version = "0.1.0"
 dependencies = [
- "bp-header-chain",
  "bp-runtime",
- "bp-test-utils",
- "env_logger 0.8.3",
- "finality-grandpa",
- "frame-support",
- "frame-system",
- "hash-db",
+ "env_logger 0.8.4",
  "honggfuzz",
  "log",
- "parity-scale-codec",
  "sp-core",
- "sp-finality-grandpa",
- "sp-io",
- "sp-runtime",
  "sp-state-machine",
  "sp-std",
  "sp-trie",
 ]
 
-[[package]]
-name = "stream-cipher"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89"
-dependencies = [
- "block-cipher",
- "generic-array 0.14.4",
-]
-
-[[package]]
-name = "string"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d"
-dependencies = [
- "bytes 0.4.12",
-]
-
 [[package]]
 name = "strsim"
 version = "0.8.0"
@@ -8526,9 +11151,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
 
 [[package]]
 name = "structopt"
-version = "0.3.21"
+version = "0.3.23"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c"
+checksum = "bf9d950ef167e25e0bdb073cf1d68e9ad2795ac826f2f3f59647817cf23c0bfa"
 dependencies = [
  "clap",
  "lazy_static",
@@ -8537,12 +11162,12 @@ dependencies = [
 
 [[package]]
 name = "structopt-derive"
-version = "0.4.14"
+version = "0.4.16"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90"
+checksum = "134d838a2c9943ac3125cf6df165eda53493451b719f3255b2a26b85f772d0ba"
 dependencies = [
  "heck",
- "proc-macro-error",
+ "proc-macro-error 1.0.4",
  "proc-macro2",
  "quote",
  "syn",
@@ -8554,7 +11179,16 @@ version = "0.20.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "7318c509b5ba57f18533982607f24070a55d353e90d4cae30c467cdb2ad5ac5c"
 dependencies = [
- "strum_macros",
+ "strum_macros 0.20.1",
+]
+
+[[package]]
+name = "strum"
+version = "0.21.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2"
+dependencies = [
+ "strum_macros 0.21.1",
 ]
 
 [[package]]
@@ -8569,62 +11203,73 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "strum_macros"
+version = "0.21.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d06aaeeee809dbc59eb4556183dd927df67db1540de5be8d3ec0b6636358a5ec"
+dependencies = [
+ "heck",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "substrate-bip39"
-version = "0.4.2"
+version = "0.4.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236"
+checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c"
 dependencies = [
- "hmac 0.7.1",
- "pbkdf2 0.3.0",
+ "hmac 0.11.0",
+ "pbkdf2 0.8.0",
  "schnorrkel",
- "sha2 0.8.2",
+ "sha2 0.9.8",
  "zeroize",
 ]
 
 [[package]]
 name = "substrate-build-script-utils"
 version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "platforms",
 ]
 
 [[package]]
 name = "substrate-frame-rpc-system"
-version = "3.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "4.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "frame-system-rpc-runtime-api",
- "futures 0.3.13",
- "jsonrpc-core 15.1.0",
+ "futures 0.3.17",
+ "jsonrpc-core 18.0.0",
  "jsonrpc-core-client",
  "jsonrpc-derive",
  "log",
  "parity-scale-codec",
  "sc-client-api",
  "sc-rpc-api",
- "serde",
+ "sc-transaction-pool-api",
  "sp-api",
  "sp-block-builder",
  "sp-blockchain",
  "sp-core",
  "sp-runtime",
- "sp-transaction-pool",
 ]
 
 [[package]]
 name = "substrate-prometheus-endpoint"
 version = "0.9.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "async-std",
  "derive_more",
  "futures-util",
- "hyper 0.13.10",
+ "hyper",
  "log",
  "prometheus",
- "tokio 0.2.25",
+ "tokio",
 ]
 
 [[package]]
@@ -8633,7 +11278,6 @@ version = "0.1.0"
 dependencies = [
  "anyhow",
  "async-std",
- "async-trait",
  "bp-header-chain",
  "bp-kusama",
  "bp-message-dispatch",
@@ -8641,53 +11285,126 @@ dependencies = [
  "bp-millau",
  "bp-polkadot",
  "bp-rialto",
+ "bp-rialto-parachain",
  "bp-rococo",
  "bp-runtime",
+ "bp-token-swap",
  "bp-westend",
  "bp-wococo",
  "bridge-runtime-common",
  "finality-grandpa",
  "finality-relay",
  "frame-support",
- "futures 0.3.13",
- "headers-relay",
+ "futures 0.3.17",
  "hex",
- "hex-literal 0.3.1",
+ "hex-literal 0.3.3",
  "log",
  "messages-relay",
  "millau-runtime",
  "num-format",
  "num-traits",
+ "pallet-balances",
+ "pallet-bridge-dispatch",
  "pallet-bridge-grandpa",
  "pallet-bridge-messages",
+ "pallet-bridge-token-swap",
  "parity-scale-codec",
- "paste 1.0.4",
+ "paste",
+ "polkadot-parachain",
+ "polkadot-primitives",
+ "polkadot-runtime-common",
+ "polkadot-runtime-parachains",
+ "rand 0.8.4",
  "relay-kusama-client",
  "relay-millau-client",
  "relay-polkadot-client",
  "relay-rialto-client",
+ "relay-rialto-parachain-client",
  "relay-rococo-client",
  "relay-substrate-client",
  "relay-utils",
  "relay-westend-client",
  "relay-wococo-client",
+ "rialto-parachain-runtime",
  "rialto-runtime",
  "sp-core",
- "sp-finality-grandpa",
+ "sp-io",
  "sp-keyring",
  "sp-runtime",
- "sp-trie",
  "sp-version",
  "structopt",
+ "strum 0.21.0",
+ "substrate-relay-helper",
+ "tempfile",
+]
+
+[[package]]
+name = "substrate-relay-helper"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "async-std",
+ "async-trait",
+ "bp-header-chain",
+ "bp-messages",
+ "bp-millau",
+ "bp-rococo",
+ "bp-runtime",
+ "bp-wococo",
+ "bridge-runtime-common",
+ "finality-grandpa",
+ "finality-relay",
+ "frame-support",
+ "futures 0.3.17",
+ "log",
+ "messages-relay",
+ "num-traits",
+ "pallet-bridge-messages",
+ "parity-scale-codec",
+ "relay-rococo-client",
+ "relay-substrate-client",
+ "relay-utils",
+ "relay-wococo-client",
+ "rialto-runtime",
+ "sp-core",
+ "sp-finality-grandpa",
+ "sp-runtime",
+ "thiserror",
+]
+
+[[package]]
+name = "substrate-test-client"
+version = "2.0.1"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "async-trait",
+ "futures 0.3.17",
+ "hex",
+ "parity-scale-codec",
+ "sc-client-api",
+ "sc-client-db",
+ "sc-consensus",
+ "sc-executor",
+ "sc-light",
+ "sc-offchain",
+ "sc-service",
+ "serde",
+ "serde_json",
+ "sp-blockchain",
+ "sp-consensus",
+ "sp-core",
+ "sp-keyring",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-state-machine",
 ]
 
 [[package]]
 name = "substrate-wasm-builder"
-version = "4.0.0"
-source = "git+https://github.com/paritytech/substrate?branch=master#550d64cc7e233edf815c215b5329e1171cd59d1d"
+version = "5.0.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
 dependencies = [
  "ansi_term 0.12.1",
- "atty",
  "build-helper",
  "cargo_metadata",
  "sp-maybe-compressed-blob",
@@ -8699,32 +11416,37 @@ dependencies = [
 
 [[package]]
 name = "subtle"
-version = "1.0.0"
+version = "2.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee"
+checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
 
 [[package]]
-name = "subtle"
-version = "2.4.0"
+name = "syn"
+version = "1.0.80"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2"
+checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-xid",
+]
 
 [[package]]
-name = "syn"
-version = "1.0.68"
+name = "syn-mid"
+version = "0.5.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87"
+checksum = "baa8e7560a164edb1621a55d18a0c59abf49d360f47aa7b821061dd7eea7fac9"
 dependencies = [
  "proc-macro2",
  "quote",
- "unicode-xid",
+ "syn",
 ]
 
 [[package]]
 name = "synstructure"
-version = "0.12.4"
+version = "0.12.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
+checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -8740,7 +11462,7 @@ checksum = "de94457a09609f33fec5e7fceaf907488967c6c7c75d64da6a7ce6ffdb8b5abd"
 dependencies = [
  "cc",
  "cfg-if 1.0.0",
- "core-foundation-sys 0.8.2",
+ "core-foundation-sys",
  "doc-comment",
  "libc",
  "ntapi",
@@ -8749,12 +11471,6 @@ dependencies = [
  "winapi 0.3.9",
 ]
 
-[[package]]
-name = "take_mut"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
-
 [[package]]
 name = "tap"
 version = "1.0.1"
@@ -8763,9 +11479,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
 
 [[package]]
 name = "target-lexicon"
-version = "0.12.0"
+version = "0.12.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64ae3b39281e4b14b8123bdbaddd472b7dfe215e444181f2f9d2443c2444f834"
+checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff"
 
 [[package]]
 name = "tempfile"
@@ -8775,8 +11491,8 @@ checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
 dependencies = [
  "cfg-if 1.0.0",
  "libc",
- "rand 0.8.3",
- "redox_syscall 0.2.5",
+ "rand 0.8.4",
+ "redox_syscall 0.2.10",
  "remove_dir_all",
  "winapi 0.3.9",
 ]
@@ -8801,18 +11517,18 @@ dependencies = [
 
 [[package]]
 name = "thiserror"
-version = "1.0.24"
+version = "1.0.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e"
+checksum = "602eca064b2d83369e2b2f34b09c70b605402801927c65c11071ac911d299b88"
 dependencies = [
  "thiserror-impl",
 ]
 
 [[package]]
 name = "thiserror-impl"
-version = "1.0.24"
+version = "1.0.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0"
+checksum = "bad553cc2c78e8de258400763a647e80e6d1b31ee237275d756f6836d204494c"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -8837,6 +11553,19 @@ dependencies = [
  "num_cpus",
 ]
 
+[[package]]
+name = "thrift"
+version = "0.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b"
+dependencies = [
+ "byteorder",
+ "integer-encoding",
+ "log",
+ "ordered-float",
+ "threadpool",
+]
+
 [[package]]
 name = "time"
 version = "0.1.44"
@@ -8850,9 +11579,9 @@ dependencies = [
 
 [[package]]
 name = "time"
-version = "0.2.25"
+version = "0.2.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1195b046942c221454c2539395f85413b33383a067449d78aab2b7b052a142f7"
+checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242"
 dependencies = [
  "const_fn",
  "libc",
@@ -8875,9 +11604,9 @@ dependencies = [
 
 [[package]]
 name = "time-macros-impl"
-version = "0.1.1"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa"
+checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f"
 dependencies = [
  "proc-macro-hack",
  "proc-macro2",
@@ -8888,9 +11617,9 @@ dependencies = [
 
 [[package]]
 name = "tiny-bip39"
-version = "0.8.0"
+version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8"
+checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d"
 dependencies = [
  "anyhow",
  "hmac 0.8.1",
@@ -8898,297 +11627,113 @@ dependencies = [
  "pbkdf2 0.4.0",
  "rand 0.7.3",
  "rustc-hash",
- "sha2 0.9.3",
+ "sha2 0.9.8",
  "thiserror",
- "unicode-normalization",
- "zeroize",
-]
-
-[[package]]
-name = "tiny-keccak"
-version = "2.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
-dependencies = [
- "crunchy",
-]
-
-[[package]]
-name = "tinyvec"
-version = "1.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "317cca572a0e89c3ce0ca1f1bdc9369547fe318a683418e42ac8f59d14701023"
-dependencies = [
- "tinyvec_macros",
-]
-
-[[package]]
-name = "tinyvec_macros"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
-
-[[package]]
-name = "tokio"
-version = "0.1.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "mio",
- "num_cpus",
- "tokio-codec",
- "tokio-current-thread",
- "tokio-executor",
- "tokio-fs",
- "tokio-io",
- "tokio-reactor",
- "tokio-sync",
- "tokio-tcp",
- "tokio-threadpool",
- "tokio-timer",
- "tokio-udp",
- "tokio-uds",
-]
-
-[[package]]
-name = "tokio"
-version = "0.2.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092"
-dependencies = [
- "bytes 0.5.6",
- "fnv",
- "futures-core",
- "iovec",
- "lazy_static",
- "libc",
- "memchr",
- "mio",
- "mio-uds",
- "num_cpus",
- "pin-project-lite 0.1.11",
- "signal-hook-registry",
- "slab",
- "winapi 0.3.9",
-]
-
-[[package]]
-name = "tokio-buf"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46"
-dependencies = [
- "bytes 0.4.12",
- "either",
- "futures 0.1.31",
-]
-
-[[package]]
-name = "tokio-codec"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "tokio-io",
-]
-
-[[package]]
-name = "tokio-current-thread"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e"
-dependencies = [
- "futures 0.1.31",
- "tokio-executor",
-]
-
-[[package]]
-name = "tokio-executor"
-version = "0.1.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671"
-dependencies = [
- "crossbeam-utils 0.7.2",
- "futures 0.1.31",
-]
-
-[[package]]
-name = "tokio-fs"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4"
-dependencies = [
- "futures 0.1.31",
- "tokio-io",
- "tokio-threadpool",
-]
-
-[[package]]
-name = "tokio-io"
-version = "0.1.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "log",
-]
-
-[[package]]
-name = "tokio-named-pipes"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d282d483052288b2308ba5ee795f5673b159c9bdf63c385a05609da782a5eae"
-dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "mio",
- "mio-named-pipes",
- "tokio 0.1.22",
+ "unicode-normalization",
+ "wasm-bindgen",
+ "zeroize",
 ]
 
 [[package]]
-name = "tokio-reactor"
-version = "0.1.12"
+name = "tiny-keccak"
+version = "2.0.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351"
+checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
 dependencies = [
- "crossbeam-utils 0.7.2",
- "futures 0.1.31",
- "lazy_static",
- "log",
- "mio",
- "num_cpus",
- "parking_lot 0.9.0",
- "slab",
- "tokio-executor",
- "tokio-io",
- "tokio-sync",
+ "crunchy",
 ]
 
 [[package]]
-name = "tokio-rustls"
-version = "0.14.1"
+name = "tinyvec"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e12831b255bcfa39dc0436b01e19fea231a37db570686c06ee72c423479f889a"
+checksum = "5241dd6f21443a3606b432718b166d3cedc962fd4b8bea54a8bc7f514ebda986"
 dependencies = [
- "futures-core",
- "rustls 0.18.1",
- "tokio 0.2.25",
- "webpki 0.21.4",
+ "tinyvec_macros",
 ]
 
 [[package]]
-name = "tokio-service"
+name = "tinyvec_macros"
 version = "0.1.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162"
-dependencies = [
- "futures 0.1.31",
-]
-
-[[package]]
-name = "tokio-sync"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee"
-dependencies = [
- "fnv",
- "futures 0.1.31",
-]
+checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
 
 [[package]]
-name = "tokio-tcp"
-version = "0.1.4"
+name = "tokio"
+version = "1.12.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72"
+checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc"
 dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "iovec",
- "mio",
- "tokio-io",
- "tokio-reactor",
+ "autocfg",
+ "bytes 1.1.0",
+ "libc",
+ "memchr",
+ "mio 0.7.13",
+ "num_cpus",
+ "once_cell",
+ "parking_lot 0.11.2",
+ "pin-project-lite 0.2.7",
+ "signal-hook-registry",
+ "tokio-macros",
+ "winapi 0.3.9",
 ]
 
 [[package]]
-name = "tokio-threadpool"
-version = "0.1.18"
+name = "tokio-macros"
+version = "1.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89"
+checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110"
 dependencies = [
- "crossbeam-deque 0.7.3",
- "crossbeam-queue",
- "crossbeam-utils 0.7.2",
- "futures 0.1.31",
- "lazy_static",
- "log",
- "num_cpus",
- "slab",
- "tokio-executor",
+ "proc-macro2",
+ "quote",
+ "syn",
 ]
 
 [[package]]
-name = "tokio-timer"
-version = "0.2.13"
+name = "tokio-native-tls"
+version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296"
+checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b"
 dependencies = [
- "crossbeam-utils 0.7.2",
- "futures 0.1.31",
- "slab",
- "tokio-executor",
+ "native-tls",
+ "tokio",
 ]
 
 [[package]]
-name = "tokio-udp"
-version = "0.1.6"
+name = "tokio-rustls"
+version = "0.22.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82"
+checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6"
 dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "log",
- "mio",
- "tokio-codec",
- "tokio-io",
- "tokio-reactor",
+ "rustls",
+ "tokio",
+ "webpki",
 ]
 
 [[package]]
-name = "tokio-uds"
-version = "0.2.7"
+name = "tokio-stream"
+version = "0.1.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0"
+checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f"
 dependencies = [
- "bytes 0.4.12",
- "futures 0.1.31",
- "iovec",
- "libc",
- "log",
- "mio",
- "mio-uds",
- "tokio-codec",
- "tokio-io",
- "tokio-reactor",
+ "futures-core",
+ "pin-project-lite 0.2.7",
+ "tokio",
 ]
 
 [[package]]
 name = "tokio-util"
-version = "0.3.1"
+version = "0.6.8"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
+checksum = "08d3725d3efa29485e87311c5b699de63cde14b00ed4d256b8318aa30ca452cd"
 dependencies = [
- "bytes 0.5.6",
+ "bytes 1.1.0",
  "futures-core",
+ "futures-io",
  "futures-sink",
  "log",
- "pin-project-lite 0.1.11",
- "tokio 0.2.25",
+ "pin-project-lite 0.2.7",
+ "tokio",
 ]
 
 [[package]]
@@ -9208,22 +11753,22 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6"
 
 [[package]]
 name = "tracing"
-version = "0.1.25"
+version = "0.1.29"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f"
+checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105"
 dependencies = [
  "cfg-if 1.0.0",
  "log",
- "pin-project-lite 0.2.4",
+ "pin-project-lite 0.2.7",
  "tracing-attributes",
  "tracing-core",
 ]
 
 [[package]]
 name = "tracing-attributes"
-version = "0.1.13"
+version = "0.1.18"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07"
+checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -9232,9 +11777,9 @@ dependencies = [
 
 [[package]]
 name = "tracing-core"
-version = "0.1.17"
+version = "0.1.21"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f"
+checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4"
 dependencies = [
  "lazy_static",
 ]
@@ -9245,7 +11790,7 @@ version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2"
 dependencies = [
- "pin-project 1.0.5",
+ "pin-project 1.0.8",
  "tracing",
 ]
 
@@ -9272,9 +11817,9 @@ dependencies = [
 
 [[package]]
 name = "tracing-subscriber"
-version = "0.2.18"
+version = "0.2.22"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "aa5553bf0883ba7c9cbe493b085c29926bd41b66afc31ff72cf17ff4fb60dcd5"
+checksum = "62af966210b88ad5776ee3ba12d5f35b8d6a2b2a12168f3080cf02b814d7376b"
 dependencies = [
  "ansi_term 0.12.1",
  "chrono",
@@ -9284,7 +11829,7 @@ dependencies = [
  "serde",
  "serde_json",
  "sharded-slab",
- "smallvec 1.6.1",
+ "smallvec",
  "thread_local",
  "tracing",
  "tracing-core",
@@ -9294,15 +11839,15 @@ dependencies = [
 
 [[package]]
 name = "trie-db"
-version = "0.22.5"
+version = "0.22.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd81fe0c8bc2b528a51c9d2c31dae4483367a26a723a3c9a4a8120311d7774e3"
+checksum = "9eac131e334e81b6b3be07399482042838adcd7957aa0010231d0813e39e02fa"
 dependencies = [
  "hash-db",
  "hashbrown",
  "log",
  "rustc-hex",
- "smallvec 1.6.1",
+ "smallvec",
 ]
 
 [[package]]
@@ -9326,9 +11871,9 @@ dependencies = [
 
 [[package]]
 name = "trust-dns-proto"
-version = "0.20.1"
+version = "0.20.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d57e219ba600dd96c2f6d82eb79645068e14edbc5c7e27514af40436b88150c"
+checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4"
 dependencies = [
  "async-trait",
  "cfg-if 1.0.0",
@@ -9337,22 +11882,22 @@ dependencies = [
  "futures-channel",
  "futures-io",
  "futures-util",
- "idna 0.2.2",
+ "idna 0.2.3",
  "ipnet",
  "lazy_static",
  "log",
- "rand 0.8.3",
- "smallvec 1.6.1",
+ "rand 0.8.4",
+ "smallvec",
  "thiserror",
  "tinyvec",
- "url 2.2.1",
+ "url 2.2.2",
 ]
 
 [[package]]
 name = "trust-dns-resolver"
-version = "0.20.1"
+version = "0.20.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0437eea3a6da51acc1e946545ff53d5b8fb2611ff1c3bed58522dde100536ae"
+checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770"
 dependencies = [
  "cfg-if 1.0.0",
  "futures-util",
@@ -9360,9 +11905,9 @@ dependencies = [
  "lazy_static",
  "log",
  "lru-cache",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "resolv-conf",
- "smallvec 1.6.1",
+ "smallvec",
  "thiserror",
  "trust-dns-proto",
 ]
@@ -9373,22 +11918,46 @@ version = "0.2.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
 
+[[package]]
+name = "try-runtime-cli"
+version = "0.10.0-dev"
+source = "git+https://github.com/paritytech/substrate?branch=master#d0f6c1c60da22e04dd25c2eca46ebfe6f1571af0"
+dependencies = [
+ "jsonrpsee-ws-client",
+ "log",
+ "parity-scale-codec",
+ "remote-externalities",
+ "sc-chain-spec",
+ "sc-cli",
+ "sc-executor",
+ "sc-service",
+ "serde",
+ "sp-core",
+ "sp-externalities",
+ "sp-io",
+ "sp-keystore",
+ "sp-runtime",
+ "sp-state-machine",
+ "sp-version",
+ "structopt",
+]
+
 [[package]]
 name = "twox-hash"
-version = "1.6.0"
+version = "1.6.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59"
+checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e"
 dependencies = [
  "cfg-if 0.1.10",
- "rand 0.7.3",
+ "rand 0.8.4",
  "static_assertions",
 ]
 
 [[package]]
 name = "typenum"
-version = "1.12.0"
+version = "1.14.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
+checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec"
 
 [[package]]
 name = "ucd-trie"
@@ -9398,9 +11967,9 @@ checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c"
 
 [[package]]
 name = "uint"
-version = "0.9.0"
+version = "0.9.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e"
+checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f"
 dependencies = [
  "byteorder",
  "crunchy",
@@ -9419,27 +11988,24 @@ dependencies = [
 
 [[package]]
 name = "unicode-bidi"
-version = "0.3.4"
+version = "0.3.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5"
-dependencies = [
- "matches",
-]
+checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085"
 
 [[package]]
 name = "unicode-normalization"
-version = "0.1.17"
+version = "0.1.19"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef"
+checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9"
 dependencies = [
  "tinyvec",
 ]
 
 [[package]]
 name = "unicode-segmentation"
-version = "1.7.1"
+version = "1.8.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796"
+checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b"
 
 [[package]]
 name = "unicode-width"
@@ -9449,18 +12015,18 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
 
 [[package]]
 name = "unicode-xid"
-version = "0.2.1"
+version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
+checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
 
 [[package]]
 name = "universal-hash"
-version = "0.4.0"
+version = "0.4.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402"
+checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05"
 dependencies = [
  "generic-array 0.14.4",
- "subtle 2.4.0",
+ "subtle",
 ]
 
 [[package]]
@@ -9476,7 +12042,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2"
 dependencies = [
  "asynchronous-codec 0.5.0",
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "futures-io",
  "futures-util",
 ]
@@ -9488,7 +12054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f"
 dependencies = [
  "asynchronous-codec 0.6.0",
- "bytes 1.0.1",
+ "bytes 1.1.0",
  "futures-io",
  "futures-util",
 ]
@@ -9512,36 +12078,31 @@ dependencies = [
 
 [[package]]
 name = "url"
-version = "2.2.1"
+version = "2.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b"
+checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c"
 dependencies = [
  "form_urlencoded",
- "idna 0.2.2",
+ "idna 0.2.3",
  "matches",
  "percent-encoding 2.1.0",
 ]
 
 [[package]]
 name = "value-bag"
-version = "1.0.0-alpha.6"
+version = "1.0.0-alpha.7"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1"
+checksum = "dd320e1520f94261153e96f7534476ad869c14022aee1e59af7c778075d840ae"
 dependencies = [
  "ctor",
+ "version_check",
 ]
 
 [[package]]
 name = "vcpkg"
-version = "0.2.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb"
-
-[[package]]
-name = "vec-arena"
-version = "1.0.0"
+version = "0.2.15"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d"
+checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
 
 [[package]]
 name = "vec_map"
@@ -9551,9 +12112,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
 
 [[package]]
 name = "version_check"
-version = "0.9.2"
+version = "0.9.3"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
+checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
 
 [[package]]
 name = "void"
@@ -9578,17 +12139,6 @@ dependencies = [
  "winapi-util",
 ]
 
-[[package]]
-name = "want"
-version = "0.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230"
-dependencies = [
- "futures 0.1.31",
- "log",
- "try-lock",
-]
-
 [[package]]
 name = "want"
 version = "0.3.0"
@@ -9613,19 +12163,21 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
 
 [[package]]
 name = "wasm-bindgen"
-version = "0.2.73"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9"
+checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c"
 dependencies = [
  "cfg-if 1.0.0",
+ "serde",
+ "serde_json",
  "wasm-bindgen-macro",
 ]
 
 [[package]]
 name = "wasm-bindgen-backend"
-version = "0.2.73"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae"
+checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523"
 dependencies = [
  "bumpalo",
  "lazy_static",
@@ -9638,9 +12190,9 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-futures"
-version = "0.4.20"
+version = "0.4.27"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3de431a2910c86679c34283a33f66f4e4abd7e0aec27b6669060148872aadf94"
+checksum = "a87d738d4abc4cf22f6eb142f5b9a81301331ee3c767f2fef2fda4e325492060"
 dependencies = [
  "cfg-if 1.0.0",
  "js-sys",
@@ -9650,9 +12202,9 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-macro"
-version = "0.2.73"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f"
+checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8"
 dependencies = [
  "quote",
  "wasm-bindgen-macro-support",
@@ -9660,9 +12212,9 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-macro-support"
-version = "0.2.73"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c"
+checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -9673,9 +12225,9 @@ dependencies = [
 
 [[package]]
 name = "wasm-bindgen-shared"
-version = "0.2.73"
+version = "0.2.77"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489"
+checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4"
 
 [[package]]
 name = "wasm-gc-api"
@@ -9694,9 +12246,9 @@ version = "0.2.5"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "js-sys",
- "parking_lot 0.11.1",
+ "parking_lot 0.11.2",
  "pin-utils",
  "wasm-bindgen",
  "wasm-bindgen-futures",
@@ -9712,7 +12264,7 @@ dependencies = [
  "downcast-rs",
  "libc",
  "memory_units",
- "num-rational",
+ "num-rational 0.2.4",
  "num-traits",
  "parity-wasm 0.42.2",
  "wasmi-validation",
@@ -9729,15 +12281,15 @@ dependencies = [
 
 [[package]]
 name = "wasmparser"
-version = "0.78.2"
+version = "0.79.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65"
+checksum = "5b5894be15a559c85779254700e1d35f02f843b5a69152e5c82c626d9fd66c0e"
 
 [[package]]
 name = "wasmtime"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7"
+checksum = "8bbb8a082a8ef50f7eeb8b82dda9709ef1e68963ea3c94e45581644dd4041835"
 dependencies = [
  "anyhow",
  "backtrace",
@@ -9748,29 +12300,27 @@ dependencies = [
  "lazy_static",
  "libc",
  "log",
- "paste 1.0.4",
+ "paste",
  "psm",
  "region",
  "rustc-demangle",
  "serde",
- "smallvec 1.6.1",
+ "smallvec",
  "target-lexicon",
  "wasmparser",
  "wasmtime-cache",
  "wasmtime-environ",
- "wasmtime-fiber",
  "wasmtime-jit",
  "wasmtime-profiling",
  "wasmtime-runtime",
- "wat",
  "winapi 0.3.9",
 ]
 
 [[package]]
 name = "wasmtime-cache"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843"
+checksum = "d73391579ca7f24573138ef768b73b2aed5f9d542385c64979b65d60d0912399"
 dependencies = [
  "anyhow",
  "base64 0.13.0",
@@ -9781,7 +12331,7 @@ dependencies = [
  "libc",
  "log",
  "serde",
- "sha2 0.9.3",
+ "sha2 0.9.8",
  "toml",
  "winapi 0.3.9",
  "zstd",
@@ -9789,9 +12339,9 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-cranelift"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6"
+checksum = "81c6f5ae9205382345c7cd7454932a906186836999a2161c385e38a15f52e1fe"
 dependencies = [
  "cranelift-codegen",
  "cranelift-entity",
@@ -9804,14 +12354,14 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-debug"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a"
+checksum = "c69e08f55e12f15f50b1b533bc3626723e7224254a065de6576934c86258c9e8"
 dependencies = [
  "anyhow",
- "gimli 0.24.0",
+ "gimli",
  "more-asserts",
- "object 0.24.0",
+ "object",
  "target-lexicon",
  "thiserror",
  "wasmparser",
@@ -9820,15 +12370,15 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-environ"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90"
+checksum = "005d93174040af37fb8625f891cd9827afdad314261f7ec4ee61ec497d6e9d3c"
 dependencies = [
  "cfg-if 1.0.0",
  "cranelift-codegen",
  "cranelift-entity",
  "cranelift-wasm",
- "gimli 0.24.0",
+ "gimli",
  "indexmap",
  "log",
  "more-asserts",
@@ -9837,24 +12387,13 @@ dependencies = [
  "wasmparser",
 ]
 
-[[package]]
-name = "wasmtime-fiber"
-version = "0.27.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a089d44cd7e2465d41a53b840a5b4fca1bf6d1ecfebc970eac9592b34ea5f0b3"
-dependencies = [
- "cc",
- "libc",
- "winapi 0.3.9",
-]
-
 [[package]]
 name = "wasmtime-jit"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707"
+checksum = "d0bf1dfb213a35d8f21aefae40e597fe72778a907011ffdff7affb029a02af9a"
 dependencies = [
- "addr2line 0.15.2",
+ "addr2line",
  "anyhow",
  "cfg-if 1.0.0",
  "cranelift-codegen",
@@ -9862,10 +12401,10 @@ dependencies = [
  "cranelift-frontend",
  "cranelift-native",
  "cranelift-wasm",
- "gimli 0.24.0",
+ "gimli",
  "log",
  "more-asserts",
- "object 0.24.0",
+ "object",
  "rayon",
  "region",
  "serde",
@@ -9883,13 +12422,13 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-obj"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b"
+checksum = "d231491878e710c68015228c9f9fc5955fe5c96dbf1485c15f7bed55b622c83c"
 dependencies = [
  "anyhow",
  "more-asserts",
- "object 0.24.0",
+ "object",
  "target-lexicon",
  "wasmtime-debug",
  "wasmtime-environ",
@@ -9897,16 +12436,16 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-profiling"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b"
+checksum = "21486cfb5255c2069666c1f116f9e949d4e35c9a494f11112fa407879e42198d"
 dependencies = [
  "anyhow",
  "cfg-if 1.0.0",
- "gimli 0.24.0",
+ "gimli",
  "lazy_static",
  "libc",
- "object 0.24.0",
+ "object",
  "scroll",
  "serde",
  "target-lexicon",
@@ -9916,9 +12455,9 @@ dependencies = [
 
 [[package]]
 name = "wasmtime-runtime"
-version = "0.27.0"
+version = "0.29.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7"
+checksum = "d7ddfdf32e0a20d81f48be9dacd31612bc61de5a174d1356fef806d300f507de"
 dependencies = [
  "anyhow",
  "backtrace",
@@ -9929,39 +12468,20 @@ dependencies = [
  "libc",
  "log",
  "mach",
- "memoffset 0.6.1",
+ "memoffset",
  "more-asserts",
- "rand 0.8.3",
+ "rand 0.8.4",
  "region",
  "thiserror",
  "wasmtime-environ",
- "wasmtime-fiber",
  "winapi 0.3.9",
 ]
 
-[[package]]
-name = "wast"
-version = "35.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1a5800e9f86a1eae935e38bea11e60fd253f6d514d153fb39b3e5535a7b37b56"
-dependencies = [
- "leb128",
-]
-
-[[package]]
-name = "wat"
-version = "1.0.37"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ec280a739b69173e0ffd12c1658507996836ba4e992ed9bc1e5385a0bd72a02"
-dependencies = [
- "wast",
-]
-
 [[package]]
 name = "web-sys"
-version = "0.3.47"
+version = "0.3.54"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3"
+checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a"
 dependencies = [
  "js-sys",
  "wasm-bindgen",
@@ -9969,41 +12489,54 @@ dependencies = [
 
 [[package]]
 name = "web3"
-version = "0.15.0"
-source = "git+https://github.com/tomusdrw/rust-web3.git?branch=td-ethabi#68dabc289bf9f5e59447d822c5da5b4c768175c6"
+version = "0.16.0"
+source = "git+https://github.com/svyatonik/rust-web3.git?branch=bump-deps#117badfea7d6dbd748671648e877d6499e20f6ae"
 dependencies = [
  "arrayvec 0.5.2",
+ "base64 0.13.0",
+ "bytes 1.1.0",
  "derive_more",
  "ethabi",
  "ethereum-types",
- "futures 0.3.13",
+ "futures 0.3.17",
  "futures-timer 3.0.2",
+ "headers",
  "hex",
- "jsonrpc-core 17.0.0",
+ "jsonrpc-core 17.1.0",
  "log",
- "parking_lot 0.11.1",
- "pin-project 1.0.5",
+ "parking_lot 0.11.2",
+ "pin-project 1.0.8",
+ "reqwest",
  "rlp",
+ "secp256k1",
  "serde",
  "serde_json",
+ "soketto 0.5.0",
  "tiny-keccak",
+ "tokio",
+ "tokio-stream",
+ "tokio-util",
+ "url 2.2.2",
+ "web3-async-native-tls",
 ]
 
 [[package]]
-name = "webpki"
-version = "0.21.4"
+name = "web3-async-native-tls"
+version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea"
+checksum = "1f6d8d1636b2627fe63518d5a9b38a569405d9c9bc665c43c9c341de57227ebb"
 dependencies = [
- "ring",
- "untrusted",
+ "native-tls",
+ "thiserror",
+ "tokio",
+ "url 2.2.2",
 ]
 
 [[package]]
 name = "webpki"
-version = "0.22.0"
+version = "0.21.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd"
+checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea"
 dependencies = [
  "ring",
  "untrusted",
@@ -10011,39 +12544,31 @@ dependencies = [
 
 [[package]]
 name = "webpki-roots"
-version = "0.21.0"
+version = "0.21.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376"
+checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940"
 dependencies = [
- "webpki 0.21.4",
+ "webpki",
 ]
 
 [[package]]
-name = "wepoll-sys"
-version = "3.0.1"
+name = "wepoll-ffi"
+version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff"
+checksum = "d743fdedc5c64377b5fc2bc036b01c7fd642205a0d96356034ae3404d49eb7fb"
 dependencies = [
  "cc",
 ]
 
 [[package]]
 name = "which"
-version = "3.1.1"
+version = "4.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "which"
-version = "4.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87c14ef7e1b8b8ecfc75d5eca37949410046e66f15d185c01d70824f1f8111ef"
+checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9"
 dependencies = [
+ "either",
+ "lazy_static",
  "libc",
- "thiserror",
 ]
 
 [[package]]
@@ -10104,6 +12629,15 @@ dependencies = [
  "winapi 0.3.9",
 ]
 
+[[package]]
+name = "winreg"
+version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69"
+dependencies = [
+ "winapi 0.3.9",
+]
+
 [[package]]
 name = "ws2_32-sys"
 version = "0.2.1"
@@ -10122,15 +12656,75 @@ checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214"
 
 [[package]]
 name = "x25519-dalek"
-version = "1.1.0"
+version = "1.1.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088"
+checksum = "5a0c105152107e3b96f6a00a65e86ce82d9b125230e1c4302940eca58ff71f4f"
 dependencies = [
- "curve25519-dalek 3.0.2",
+ "curve25519-dalek 3.2.0",
  "rand_core 0.5.1",
  "zeroize",
 ]
 
+[[package]]
+name = "xcm"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "derivative",
+ "impl-trait-for-tuples",
+ "log",
+ "parity-scale-codec",
+ "scale-info",
+ "xcm-procedural",
+]
+
+[[package]]
+name = "xcm-builder"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "frame-support",
+ "frame-system",
+ "log",
+ "pallet-transaction-payment",
+ "parity-scale-codec",
+ "polkadot-parachain",
+ "scale-info",
+ "sp-arithmetic",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+ "xcm-executor",
+]
+
+[[package]]
+name = "xcm-executor"
+version = "0.9.11"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "frame-support",
+ "impl-trait-for-tuples",
+ "log",
+ "parity-scale-codec",
+ "sp-arithmetic",
+ "sp-core",
+ "sp-io",
+ "sp-runtime",
+ "sp-std",
+ "xcm",
+]
+
+[[package]]
+name = "xcm-procedural"
+version = "0.1.0"
+source = "git+https://github.com/paritytech/polkadot?branch=master#dd4b2e6a34a08a01b876d14641e99e7011be3463"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
 [[package]]
 name = "yaml-rust"
 version = "0.3.5"
@@ -10143,28 +12737,28 @@ version = "0.9.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107"
 dependencies = [
- "futures 0.3.13",
+ "futures 0.3.17",
  "log",
  "nohash-hasher",
- "parking_lot 0.11.1",
- "rand 0.8.3",
+ "parking_lot 0.11.2",
+ "rand 0.8.4",
  "static_assertions",
 ]
 
 [[package]]
 name = "zeroize"
-version = "1.2.0"
+version = "1.4.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36"
+checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970"
 dependencies = [
  "zeroize_derive",
 ]
 
 [[package]]
 name = "zeroize_derive"
-version = "1.0.1"
+version = "1.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16"
+checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7"
 dependencies = [
  "proc-macro2",
  "quote",
@@ -10174,18 +12768,18 @@ dependencies = [
 
 [[package]]
 name = "zstd"
-version = "0.6.1+zstd.1.4.9"
+version = "0.9.0+zstd.1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3"
+checksum = "07749a5dc2cb6b36661290245e350f15ec3bbb304e493db54a1d354480522ccd"
 dependencies = [
  "zstd-safe",
 ]
 
 [[package]]
 name = "zstd-safe"
-version = "3.0.1+zstd.1.4.9"
+version = "4.1.1+zstd.1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c"
+checksum = "c91c90f2c593b003603e5e0493c837088df4469da25aafff8bce42ba48caf079"
 dependencies = [
  "libc",
  "zstd-sys",
@@ -10193,9 +12787,9 @@ dependencies = [
 
 [[package]]
 name = "zstd-sys"
-version = "1.4.20+zstd.1.4.9"
+version = "1.6.1+zstd.1.5.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e"
+checksum = "615120c7a2431d16cf1cf979e7fc31ba7a5b5e5707b29c8a99e5dbf8a8392a33"
 dependencies = [
  "cc",
  "libc",
diff --git a/polkadot/bridges/Cargo.toml b/polkadot/bridges/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..1090a0fe5ba8d339f60c53f1e1eb734064be26ad
--- /dev/null
+++ b/polkadot/bridges/Cargo.toml
@@ -0,0 +1,11 @@
+[workspace]
+resolver = "2"
+
+members = [
+	"bin/*/node",
+	"bin/*/runtime",
+	"fuzz/*",
+	"modules/*",
+	"primitives/*",
+	"relays/*",
+]
diff --git a/polkadot/bridges/README.md b/polkadot/bridges/README.md
index b407f203b7427eb5923e770747c5c0b48cadeb82..41d7fec13d5b9337dbf5999a826a9a9ec1af84da 100644
--- a/polkadot/bridges/README.md
+++ b/polkadot/bridges/README.md
@@ -38,6 +38,25 @@ cargo build --all
 cargo test --all
 ```
 
+Also you can build the repo with 
+[Parity CI Docker image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci):
+
+```bash
+docker pull paritytech/bridges-ci:production
+mkdir ~/cache
+chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000
+docker run --rm -it -w /shellhere/parity-bridges-common \
+                    -v /home/$(whoami)/cache/:/cache/    \
+                    -v "$(pwd)":/shellhere/parity-bridges-common \
+                    -e CARGO_HOME=/cache/cargo/ \
+                    -e SCCACHE_DIR=/cache/sccache/ \
+                    -e CARGO_TARGET_DIR=/cache/target/  paritytech/bridges-ci:production cargo build --all
+#artifacts can be found in ~/cache/target
+```
+
+If you want to reproduce other steps of CI process you can use the following 
+[guide](https://github.com/paritytech/scripts#reproduce-ci-locally).
+
 If you need more information about setting up your development environment Substrate's
 [Getting Started](https://substrate.dev/docs/en/knowledgebase/getting-started/) page is a good
 resource.
@@ -102,10 +121,9 @@ the `relays` which are used to pass messages between chains.
 To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes
 on each side of the bridge (source and target chain).
 
-There are 3 ways to run the bridge, described below:
+There are 2 ways to run the bridge, described below:
 
-- building & running from source,
-- building or using Docker images for each individual component,
+- building & running from source
 - running a Docker Compose setup (recommended).
 
 ### Using the Source
@@ -119,88 +137,102 @@ cargo build -p millau-bridge-node
 cargo build -p substrate-relay
 ```
 
-### Running
+### Running a Dev network
 
-To run a simple dev network you'll can use the scripts located in
-[the `deployments/local-scripts` folder](./deployments/local-scripts). Since the relayer connects to
-both Substrate chains it must be run last.
+We will launch a dev network to demonstrate how to relay a message between two Substrate based
+chains (named Rialto and Millau).
+
+To do this we will need two nodes, two relayers which will relay headers, and two relayers which
+will relay messages.
+
+#### Running from local scripts
+
+To run a simple dev network you can use the scripts located in the
+[`deployments/local-scripts` folder](./deployments/local-scripts).
+
+First, we must run the two Substrate nodes.
 
 ```bash
 # In `parity-bridges-common` folder
 ./deployments/local-scripts/run-rialto-node.sh
 ./deployments/local-scripts/run-millau-node.sh
+```
+
+After the nodes are up we can run the header relayers.
+
+```bash
 ./deployments/local-scripts/relay-millau-to-rialto.sh
+./deployments/local-scripts/relay-rialto-to-millau.sh
 ```
 
 At this point you should see the relayer submitting headers from the Millau Substrate chain to the
 Rialto Substrate chain.
 
-### Local Docker Setup
-
-To get up and running quickly you can use published Docker images for the bridge nodes and relayer.
-The images are published on [Docker Hub](https://hub.docker.com/u/paritytech).
+```
+# Header Relayer Logs
+[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto
+[...] [date] INFO bridge Synced 147 of 147 headers
+[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto
+[...] [date] INFO bridge Synced 148 of 149 headers
+```
 
-To run the dev network we first run the two bridge nodes:
+Finally, we can run the message relayers.
 
 ```bash
-docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \
-           -it paritytech/rialto-bridge-node --dev --tmp \
-           --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external
-
-docker run -p 30334:30333 -p 9934:9933 -p 9945:9944 \
-           -it paritytech/millau-bridge-node --dev --tmp \
-           --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external
+./deployments/local-scripts/relay-messages-millau-to-rialto.sh
+./deployments/local-scripts/relay-messages-rialto-to-millau.sh
 ```
 
-Notice that the `docker run` command will accept all the normal Substrate flags. For local
-development you should at minimum run with the `--dev` flag or else no blocks will be produced.
+You will also see the message lane relayers listening for new messages.
 
-Then we need to initialize and run the relayer:
+```
+# Message Relayer Logs
+[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces
+[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race
+[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces
+[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () }
+[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces
+[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () }
+[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) }
+[...] [date] DEBUG bridge Asking Millau node about its state
+[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) }
+```
 
-```bash
-docker run --network=host -it \
-        paritytech/substrate-relay init-bridge RialtoToMillau \
-        --target-host localhost \
-        --target-port 9945 \
-        --source-host localhost \
-        --source-port 9944 \
-        --target-signer //Alice
+To send a message see the ["How to send a message" section](#how-to-send-a-message).
 
-docker run --network=host -it \
-        paritytech/substrate-relay relay-headers RialtoToMillau \
-        --target-host localhost \
-        --target-port 9945 \
-        --source-host localhost \
-        --source-port 9944 \
-        --target-signer //Bob \
-```
+### Full Network Docker Compose Setup
+
+For a more sophisticated deployment which includes bidirectional header sync, message passing,
+monitoring dashboards, etc. see the [Deployments README](./deployments/README.md).
 
-You should now see the relayer submitting headers from the Millau chain to the Rialto chain.
+You should note that you can find images for all the bridge components published on
+[Docker Hub](https://hub.docker.com/u/paritytech).
 
-If you don't want to use the published Docker images you can build images yourself. You can do this
-by running the following commands at the top level of the repository.
+To run a Rialto node for example, you can use the following command:
 
 ```bash
-# In `parity-bridges-common` folder
-docker build . -t local/rialto-bridge-node --build-arg PROJECT=rialto-bridge-node
-docker build . -t local/millau-bridge-node --build-arg PROJECT=millau-bridge-node
-docker build . -t local/substrate-relay --build-arg PROJECT=substrate-relay
+docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \
+  -it paritytech/rialto-bridge-node --dev --tmp \
+  --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external
 ```
 
-_Note: Building the node images will take a long time, so make sure you have some coffee handy._
+### How to send a message
 
-Once you have the images built you can use them in the previous commands by replacing
-`paritytech/<component_name>` with `local/<component_name>` everywhere.
+In this section we'll show you how to quickly send a bridge message, if you want to
+interact with and test the bridge see more details in [send message](./docs/send-message.md)
 
-### Full Network Docker Compose Setup
-
-For a more sophisticated deployment which includes bidirectional header sync, message passing,
-monitoring dashboards, etc. see the [Deployments README](./deployments/README.md).
+```bash
+# In `parity-bridges-common` folder
+./scripts/send-message-from-millau-rialto.sh remark
+```
 
-### How to send a message
+After sending a message you will see the following logs showing a message was successfully sent:
 
-A straightforward way to interact with and test the bridge is sending messages. This is explained
-in the [send message](./docs/send-message.md) document.
+```
+INFO bridge Sending message to Rialto. Size: 286. Dispatch weight: 1038000. Fee: 275,002,568
+INFO bridge Signed Millau Call: 0x7904...
+TRACE bridge Sent transaction to Millau node: 0x5e68...
+```
 
 ## Community
 
diff --git a/polkadot/bridges/bin/millau/node/Cargo.toml b/polkadot/bridges/bin/millau/node/Cargo.toml
index 8c6d32402ac24203b4275717cfa50d885e401258..b650bd478a62b350fcffcda7038e158ddbae9fda 100644
--- a/polkadot/bridges/bin/millau/node/Cargo.toml
+++ b/polkadot/bridges/bin/millau/node/Cargo.toml
@@ -10,14 +10,13 @@ repository = "https://github.com/paritytech/parity-bridges-common/"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-jsonrpc-core = "15.1.0"
+jsonrpc-core = "18.0"
 structopt = "0.3.21"
 serde_json = "1.0.59"
 
 # Bridge dependencies
 
-bp-messages = { path = "../../../primitives/messages" }
-bp-millau= { path = "../../../primitives/chain-millau" }
+bp-millau = { path = "../../../primitives/chain-millau" }
 bp-runtime = { path = "../../../primitives/runtime" }
 millau-runtime = { path = "../runtime" }
 pallet-bridge-messages = { path = "../../../modules/messages" }
@@ -45,7 +44,6 @@ sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "mast
 sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" }
 substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -56,9 +54,6 @@ frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", bran
 
 [features]
 default = []
-
-# TODO: https://github.com/paritytech/parity-bridges-common/issues/390
-# I've left the feature flag here to test our CI configuration
 runtime-benchmarks = [
-	# "millau-runtime/runtime-benchmarks",
+	"millau-runtime/runtime-benchmarks",
 ]
diff --git a/polkadot/bridges/bin/millau/node/src/chain_spec.rs b/polkadot/bridges/bin/millau/node/src/chain_spec.rs
index 2c50897b965edfbc73a0550cfdb8ebaeebc11515..c32291fb385814cfc0cf7eaa63c73e46b58de442 100644
--- a/polkadot/bridges/bin/millau/node/src/chain_spec.rs
+++ b/polkadot/bridges/bin/millau/node/src/chain_spec.rs
@@ -16,8 +16,9 @@
 
 use bp_millau::derive_account_from_rialto_id;
 use millau_runtime::{
-	AccountId, AuraConfig, BalancesConfig, BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig,
-	SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
+	AccountId, AuraConfig, BalancesConfig, BridgeRialtoMessagesConfig, BridgeWestendGrandpaConfig,
+	GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig,
+	WASM_BINARY,
 };
 use sp_consensus_aura::sr25519::AuthorityId as AuraId;
 use sp_core::{sr25519, Pair, Public};
@@ -70,10 +71,7 @@ impl Alternative {
 		let properties = Some(
 			serde_json::json!({
 				"tokenDecimals": 9,
-				"tokenSymbol": "MLAU",
-				"bridgeIds": {
-					"Rialto": bp_runtime::RIALTO_CHAIN_ID,
-				}
+				"tokenSymbol": "MLAU"
 			})
 			.as_object()
 			.expect("Map given; qed")
@@ -81,8 +79,8 @@ impl Alternative {
 		);
 		match self {
 			Alternative::Development => ChainSpec::from_genesis(
-				"Development",
-				"dev",
+				"Millau Development",
+				"millau_dev",
 				sc_service::ChainType::Development,
 				|| {
 					testnet_genesis(
@@ -107,8 +105,8 @@ impl Alternative {
 				None,
 			),
 			Alternative::LocalTestnet => ChainSpec::from_genesis(
-				"Local Testnet",
-				"local_testnet",
+				"Millau Local",
+				"millau_local",
 				sc_service::ChainType::Local,
 				|| {
 					testnet_genesis(
@@ -137,10 +135,12 @@ impl Alternative {
 							get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
 							get_account_id_from_seed::<sr25519::Public>("George//stash"),
 							get_account_id_from_seed::<sr25519::Public>("Harry//stash"),
-							pallet_bridge_messages::Pallet::<
-								millau_runtime::Runtime,
-								pallet_bridge_messages::DefaultInstance,
-							>::relayer_fund_account_id(),
+							get_account_id_from_seed::<sr25519::Public>("RialtoMessagesOwner"),
+							get_account_id_from_seed::<sr25519::Public>("WithRialtoTokenSwap"),
+							pallet_bridge_messages::relayer_fund_account_id::<
+								bp_millau::AccountId,
+								bp_millau::AccountIdConverter,
+							>(),
 							derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(
 								get_account_id_from_seed::<sr25519::Public>("Alice"),
 							)),
@@ -191,12 +191,8 @@ fn testnet_genesis(
 		balances: BalancesConfig {
 			balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
 		},
-		aura: AuraConfig {
-			authorities: Vec::new(),
-		},
-		grandpa: GrandpaConfig {
-			authorities: Vec::new(),
-		},
+		aura: AuraConfig { authorities: Vec::new() },
+		grandpa: GrandpaConfig { authorities: Vec::new() },
 		sudo: SudoConfig { key: root_key },
 		session: SessionConfig {
 			keys: initial_authorities
@@ -211,15 +207,17 @@ fn testnet_genesis(
 			owner: Some(get_account_id_from_seed::<sr25519::Public>("George")),
 			..Default::default()
 		},
+		bridge_rialto_messages: BridgeRialtoMessagesConfig {
+			owner: Some(get_account_id_from_seed::<sr25519::Public>("RialtoMessagesOwner")),
+			..Default::default()
+		},
 	}
 }
 
 #[test]
 fn derived_dave_account_is_as_expected() {
 	let dave = get_account_id_from_seed::<sr25519::Public>("Dave");
-	let derived: AccountId = derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave));
-	assert_eq!(
-		derived.to_string(),
-		"5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string()
-	);
+	let derived: AccountId =
+		derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave));
+	assert_eq!(derived.to_string(), "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string());
 }
diff --git a/polkadot/bridges/bin/millau/node/src/cli.rs b/polkadot/bridges/bin/millau/node/src/cli.rs
index 46323ed25c9ed2e39ebc6089b5bfa0e2ad29ddfd..086def633c59866d067ee11e891e93490e06e0d9 100644
--- a/polkadot/bridges/bin/millau/node/src/cli.rs
+++ b/polkadot/bridges/bin/millau/node/src/cli.rs
@@ -29,10 +29,10 @@ pub struct Cli {
 /// Possible subcommands of the main binary.
 #[derive(Debug, StructOpt)]
 pub enum Subcommand {
-	/// Key management cli utilities
+	/// Key management CLI utilities
 	Key(sc_cli::KeySubcommand),
 
-	/// Verify a signature for a message, provided on STDIN, with a given (public or secret) key.
+	/// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key.
 	Verify(sc_cli::VerifyCmd),
 
 	/// Generate a seed that provides a vanity address.
diff --git a/polkadot/bridges/bin/millau/node/src/command.rs b/polkadot/bridges/bin/millau/node/src/command.rs
index d73f9b1ac9b2cc284550f4f07fa79a0c83fcdb5f..4285ecaced5161eda614a03e22c6e3ebc6f478d8 100644
--- a/polkadot/bridges/bin/millau/node/src/command.rs
+++ b/polkadot/bridges/bin/millau/node/src/command.rs
@@ -14,9 +14,11 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{Cli, Subcommand};
-use crate::service;
-use crate::service::new_partial;
+use crate::{
+	cli::{Cli, Subcommand},
+	service,
+	service::new_partial,
+};
 use millau_runtime::{Block, RuntimeApi};
 use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli};
 use sc_service::PartialComponents;
@@ -75,19 +77,18 @@ pub fn run() -> sc_cli::Result<()> {
 	));
 
 	match &cli.subcommand {
-		Some(Subcommand::Benchmark(cmd)) => {
+		Some(Subcommand::Benchmark(cmd)) =>
 			if cfg!(feature = "runtime-benchmarks") {
 				let runner = cli.create_runner(cmd)?;
 
-				runner.sync_run(|config| cmd.run::<Block, service::Executor>(config))
+				runner.sync_run(|config| cmd.run::<Block, service::ExecutorDispatch>(config))
 			} else {
 				println!(
 					"Benchmarking wasn't enabled when building the node. \
 				You can enable it with `--features runtime-benchmarks`."
 				);
 				Ok(())
-			}
-		}
+			},
 		Some(Subcommand::Key(cmd)) => cmd.run(&cli),
 		Some(Subcommand::Sign(cmd)) => cmd.run(),
 		Some(Subcommand::Verify(cmd)) => cmd.run(),
@@ -95,69 +96,53 @@ pub fn run() -> sc_cli::Result<()> {
 		Some(Subcommand::BuildSpec(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
-		}
+		},
 		Some(Subcommand::CheckBlock(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&config)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, config.database), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportState(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, config.chain_spec), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&config)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.database))
-		}
+		},
 		Some(Subcommand::Revert(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					backend,
-					..
-				} = new_partial(&config)?;
+				let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?;
 				Ok((cmd.run(client, backend), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::Inspect(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.sync_run(|config| cmd.run::<Block, RuntimeApi, service::Executor>(config))
-		}
+			runner
+				.sync_run(|config| cmd.run::<Block, RuntimeApi, service::ExecutorDispatch>(config))
+		},
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
 			runner.run_node_until_exit(|config| async move {
@@ -167,6 +152,6 @@ pub fn run() -> sc_cli::Result<()> {
 				}
 				.map_err(sc_cli::Error::Service)
 			})
-		}
+		},
 	}
 }
diff --git a/polkadot/bridges/bin/millau/node/src/service.rs b/polkadot/bridges/bin/millau/node/src/service.rs
index 599fba1c374fb13ed72e864fb41a90aee387fb40..b8d42f9c7ed34e5efe19ec36a6873393ef02b230 100644
--- a/polkadot/bridges/bin/millau/node/src/service.rs
+++ b/polkadot/bridges/bin/millau/node/src/service.rs
@@ -21,8 +21,8 @@
 // =====================================================================================
 // UPDATE GUIDE:
 // 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo);
-// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs;
-// 3) fix compilation errors;
+// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom
+// RPCs; 3) fix compilation errors;
 // 4) test :)
 // =====================================================================================
 // =====================================================================================
@@ -40,6 +40,8 @@ use sp_consensus::SlotData;
 use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
 use std::{sync::Arc, time::Duration};
 
+type Executor = NativeElseWasmExecutor<ExecutorDispatch>;
+
 // Our native executor instance.
 pub struct ExecutorDispatch;
 
@@ -55,7 +57,8 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
 	}
 }
 
-type FullClient = sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
+type FullClient =
+	sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
 type FullBackend = sc_service::TFullBackend<Block>;
 type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
 
@@ -70,7 +73,12 @@ pub fn new_partial(
 		sc_consensus::DefaultImportQueue<Block, FullClient>,
 		sc_transaction_pool::FullPool<Block, FullClient>,
 		(
-			sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
+			sc_finality_grandpa::GrandpaBlockImport<
+				FullBackend,
+				Block,
+				FullClient,
+				FullSelectChain,
+			>,
 			sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
 			Option<Telemetry>,
 		),
@@ -78,7 +86,7 @@ pub fn new_partial(
 	ServiceError,
 > {
 	if config.keystore_remote.is_some() {
-		return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
+		return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()))
 	}
 
 	let telemetry = config
@@ -92,10 +100,18 @@ pub fn new_partial(
 		})
 		.transpose()?;
 
-	let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
-		config,
-		telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-	)?;
+	let executor = NativeElseWasmExecutor::<ExecutorDispatch>::new(
+		config.wasm_method,
+		config.default_heap_pages,
+		config.max_runtime_instances,
+	);
+
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
@@ -122,26 +138,30 @@ pub fn new_partial(
 
 	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
 
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import.clone())),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let import_queue =
+		sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
+			block_import: grandpa_block_import.clone(),
+			justification_import: Some(Box::new(grandpa_block_import.clone())),
+			client: client.clone(),
+			create_inherent_data_providers: move |_, ()| async move {
+				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
-
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
+				let slot =
+					sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+						*timestamp,
+						slot_duration,
+					);
+
+				Ok((timestamp, slot))
+			},
+			spawner: &task_manager.spawn_essential_handle(),
+			can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(
+				client.executor().clone(),
+			),
+			registry: config.prometheus_registry(),
+			check_for_equivocation: Default::default(),
+			telemetry: telemetry.as_ref().map(|x| x.handle()),
+		})?;
 
 	Ok(sc_service::PartialComponents {
 		client,
@@ -178,32 +198,40 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 	if let Some(url) = &config.keystore_remote {
 		match remote_keystore(url) {
 			Ok(k) => keystore_container.set_remote_keystore(k),
-			Err(e) => {
+			Err(e) =>
 				return Err(ServiceError::Other(format!(
 					"Error hooking up remote keystore for {}: {}",
 					url, e
-				)))
-			}
+				))),
 		};
 	}
 
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
+	let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
+		backend.clone(),
+		grandpa_link.shared_authority_set().clone(),
+		vec![],
+	));
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: None,
-		block_announce_validator_builder: None,
-	})?;
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: None,
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
 	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+		sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
 	}
 
 	let role = config.role.clone();
@@ -230,8 +258,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		let shared_authority_set = grandpa_link.shared_authority_set().clone();
 		let shared_voter_state = shared_voter_state.clone();
 
-		let finality_proof_provider =
-			GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
+		let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
+			backend,
+			Some(shared_authority_set.clone()),
+		);
 
 		Box::new(move |_, subscription_executor| {
 			let mut io = jsonrpc_core::IoHandler::default();
@@ -250,7 +280,7 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 				subscription_executor,
 				finality_proof_provider.clone(),
 			)));
-			io
+			Ok(io)
 		})
 	};
 
@@ -278,37 +308,40 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 			telemetry.as_ref().map(|x| x.handle()),
 		);
 
-		let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
+		let can_author_with =
+			sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
 
 		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
 		let raw_slot_duration = slot_duration.slot_duration();
 
-		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
-			slot_duration,
-			client,
-			select_chain,
-			block_import,
-			proposer_factory,
-			create_inherent_data_providers: move |_, ()| async move {
-				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-				let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(
+			StartAuraParams {
+				slot_duration,
+				client,
+				select_chain,
+				block_import,
+				proposer_factory,
+				create_inherent_data_providers: move |_, ()| async move {
+					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+					let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
 					*timestamp,
 					raw_slot_duration,
 				);
 
-				Ok((timestamp, slot))
+					Ok((timestamp, slot))
+				},
+				force_authoring,
+				backoff_authoring_blocks,
+				keystore: keystore_container.sync_keystore(),
+				can_author_with,
+				sync_oracle: network.clone(),
+				justification_sync_link: network.clone(),
+				block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
+				max_block_proposal_slot_portion: None,
+				telemetry: telemetry.as_ref().map(|x| x.handle()),
 			},
-			force_authoring,
-			backoff_authoring_blocks,
-			keystore: keystore_container.sync_keystore(),
-			can_author_with,
-			sync_oracle: network.clone(),
-			justification_sync_link: network.clone(),
-			block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
-			max_block_proposal_slot_portion: None,
-			telemetry: telemetry.as_ref().map(|x| x.handle()),
-		})?;
+		)?;
 
 		// the AURA authoring task is considered essential, i.e. if it
 		// fails we take down the service with it.
@@ -317,11 +350,8 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 	// if the node isn't actively participating in consensus then it doesn't
 	// need a keystore, regardless of which protocol we use below.
-	let keystore = if role.is_authority() {
-		Some(keystore_container.sync_keystore())
-	} else {
-		None
-	};
+	let keystore =
+		if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
 
 	let grandpa_config = sc_finality_grandpa::Config {
 		// FIXME #1578 make this available through chainspec
@@ -353,9 +383,10 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 		// the GRANDPA voter task is considered infallible, i.e.
 		// if it fails we take down the service with it.
-		task_manager
-			.spawn_essential_handle()
-			.spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"grandpa-voter",
+			sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
+		);
 	}
 
 	network_starter.start_network();
@@ -375,10 +406,17 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		})
 		.transpose()?;
 
+	let executor = NativeElseWasmExecutor::<ExecutorDispatch>::new(
+		config.wasm_method,
+		config.default_heap_pages,
+		config.max_runtime_instances,
+	);
+
 	let (client, backend, keystore_container, mut task_manager, on_demand) =
 		sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
 			&config,
 			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
 		)?;
 
 	let mut telemetry = telemetry.map(|(worker, telemetry)| {
@@ -386,10 +424,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		telemetry
 	});
 
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
 
 	let select_chain = sc_consensus::LongestChain::new(backend.clone());
 
@@ -410,39 +445,54 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 
 	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
 
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import)),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let import_queue =
+		sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
+			block_import: grandpa_block_import.clone(),
+			justification_import: Some(Box::new(grandpa_block_import)),
+			client: client.clone(),
+			create_inherent_data_providers: move |_, ()| async move {
+				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
 
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
-
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::NeverCanAuthor,
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
+				let slot =
+					sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+						*timestamp,
+						slot_duration,
+					);
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: Some(on_demand.clone()),
-		block_announce_validator_builder: None,
-	})?;
+				Ok((timestamp, slot))
+			},
+			spawner: &task_manager.spawn_essential_handle(),
+			can_author_with: sp_consensus::NeverCanAuthor,
+			registry: config.prometheus_registry(),
+			check_for_equivocation: Default::default(),
+			telemetry: telemetry.as_ref().map(|x| x.handle()),
+		})?;
+
+	let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
+		backend.clone(),
+		grandpa_link.shared_authority_set().clone(),
+		vec![],
+	));
+
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: Some(on_demand.clone()),
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
 	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+		sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
 	}
 
 	let enable_grandpa = !config.disable_grandpa;
@@ -470,7 +520,7 @@ pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError>
 		transaction_pool,
 		task_manager: &mut task_manager,
 		on_demand: Some(on_demand),
-		rpc_extensions_builder: Box::new(|_, _| ()),
+		rpc_extensions_builder: Box::new(|_, _| Ok(())),
 		config,
 		client,
 		keystore: keystore_container.sync_keystore(),
diff --git a/polkadot/bridges/bin/millau/runtime/Cargo.toml b/polkadot/bridges/bin/millau/runtime/Cargo.toml
index 367c1c3eef70421e89a9bab3858002a5f53a40ea..c8d7f0a159582094fb4d1996a2f2e8d588c2bc4f 100644
--- a/polkadot/bridges/bin/millau/runtime/Cargo.toml
+++ b/polkadot/bridges/bin/millau/runtime/Cargo.toml
@@ -8,9 +8,10 @@ repository = "https://github.com/paritytech/parity-bridges-common/"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
 hex-literal = "0.3"
-serde = { version = "1.0.124", optional = true, features = ["derive"] }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0", optional = true, features = ["derive"] }
 
 # Bridge dependencies
 
@@ -24,36 +25,38 @@ bridge-runtime-common = { path = "../../runtime-common", default-features = fals
 pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false }
 pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false }
 pallet-bridge-messages = { path = "../../../modules/messages", default-features = false }
+pallet-bridge-token-swap = { path = "../../../modules/token-swap", default-features = false }
 pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false }
 
 # Substrate Dependencies
 
-frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [build-dependencies]
 substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -78,6 +81,7 @@ std = [
 	"pallet-bridge-dispatch/std",
 	"pallet-bridge-grandpa/std",
 	"pallet-bridge-messages/std",
+	"pallet-bridge-token-swap/std",
 	"pallet-grandpa/std",
 	"pallet-randomness-collective-flip/std",
 	"pallet-session/std",
@@ -86,6 +90,7 @@ std = [
 	"pallet-timestamp/std",
 	"pallet-transaction-payment-rpc-runtime-api/std",
 	"pallet-transaction-payment/std",
+	"scale-info/std",
 	"serde",
 	"sp-api/std",
 	"sp-block-builder/std",
@@ -101,6 +106,9 @@ std = [
 	"sp-trie/std",
 	"sp-version/std",
 ]
-# TODO: https://github.com/paritytech/parity-bridges-common/issues/390
-# I've left the feature flag here to test our CI configuration
-runtime-benchmarks = []
+runtime-benchmarks = [
+	"frame-benchmarking",
+	"frame-support/runtime-benchmarks",
+	"frame-system/runtime-benchmarks",
+	"pallet-bridge-token-swap/runtime-benchmarks",
+]
diff --git a/polkadot/bridges/bin/millau/runtime/src/lib.rs b/polkadot/bridges/bin/millau/runtime/src/lib.rs
index 9488fe0cb42805111d53a6b9e59167ad92298b93..6beae1cfb55c231d981064d720c622aec7ef2c4f 100644
--- a/polkadot/bridges/bin/millau/runtime/src/lib.rs
+++ b/polkadot/bridges/bin/millau/runtime/src/lib.rs
@@ -34,18 +34,21 @@ pub mod rialto_messages;
 
 use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge};
 
-use bridge_runtime_common::messages::{source::estimate_message_dispatch_and_delivery_fee, MessageBridge};
-use codec::Decode;
-use pallet_grandpa::{fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList};
-use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo};
+use bridge_runtime_common::messages::{
+	source::estimate_message_dispatch_and_delivery_fee, MessageBridge,
+};
+use pallet_grandpa::{
+	fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList,
+};
+use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo};
 use sp_api::impl_runtime_apis;
 use sp_consensus_aura::sr25519::AuthorityId as AuraId;
 use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
-use sp_runtime::traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys};
 use sp_runtime::{
 	create_runtime_str, generic, impl_opaque_keys,
+	traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys},
 	transaction_validity::{TransactionSource, TransactionValidity},
-	ApplyExtrinsicResult, MultiSignature, MultiSigner,
+	ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill,
 };
 use sp_std::prelude::*;
 #[cfg(feature = "std")]
@@ -62,8 +65,7 @@ pub use frame_support::{
 
 pub use frame_system::Call as SystemCall;
 pub use pallet_balances::Call as BalancesCall;
-pub use pallet_bridge_grandpa::Call as BridgeGrandpaRialtoCall;
-pub use pallet_bridge_grandpa::Call as BridgeGrandpaWestendCall;
+pub use pallet_bridge_grandpa::Call as BridgeGrandpaCall;
 pub use pallet_bridge_messages::Call as MessagesCall;
 pub use pallet_sudo::Call as SudoCall;
 pub use pallet_timestamp::Call as TimestampCall;
@@ -90,7 +92,7 @@ pub type AccountIndex = u32;
 pub type Balance = bp_millau::Balance;
 
 /// Index of a transaction in the chain.
-pub type Index = u32;
+pub type Index = bp_millau::Index;
 
 /// A hash of some data used by the chain.
 pub type Hash = bp_millau::Hash;
@@ -139,10 +141,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 /// The version information used to identify this runtime when compiled natively.
 #[cfg(feature = "std")]
 pub fn native_version() -> NativeVersion {
-	NativeVersion {
-		runtime_version: VERSION,
-		can_author_with: Default::default(),
-	}
+	NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
 }
 
 parameter_types! {
@@ -214,10 +213,11 @@ parameter_types! {
 impl pallet_aura::Config for Runtime {
 	type AuthorityId = AuraId;
 	type MaxAuthorities = MaxAuthorities;
+	type DisabledValidators = ();
 }
 impl pallet_bridge_dispatch::Config for Runtime {
 	type Event = Event;
-	type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce);
+	type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce);
 	type Call = Call;
 	type CallFilter = frame_support::traits::Everything;
 	type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall;
@@ -231,12 +231,16 @@ impl pallet_grandpa::Config for Runtime {
 	type Event = Event;
 	type Call = Call;
 	type KeyOwnerProofSystem = ();
-	type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
-	type KeyOwnerIdentification =
-		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::IdentificationTuple;
+	type KeyOwnerProof =
+		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
+	type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		GrandpaId,
+	)>>::IdentificationTuple;
 	type HandleEquivocation = ();
 	// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
 	type WeightInfo = ();
+	type MaxAuthorities = MaxAuthorities;
 }
 
 parameter_types! {
@@ -244,7 +248,7 @@ parameter_types! {
 }
 
 impl pallet_timestamp::Config for Runtime {
-	/// A timestamp: milliseconds since the Unix epoch.
+	/// A timestamp: milliseconds since the UNIX epoch.
 	type Moment = u64;
 	type OnTimestampSet = Aura;
 	type MinimumPeriod = MinimumPeriod;
@@ -278,13 +282,25 @@ impl pallet_balances::Config for Runtime {
 parameter_types! {
 	pub const TransactionBaseFee: Balance = 0;
 	pub const TransactionByteFee: Balance = 1;
+	pub const OperationalFeeMultiplier: u8 = 5;
+	// values for following parameters are copied from polkadot repo, but it is fine
+	// not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains
+	pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25);
+	pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000);
+	pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128);
 }
 
 impl pallet_transaction_payment::Config for Runtime {
 	type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter<Balances, ()>;
 	type TransactionByteFee = TransactionByteFee;
-	type WeightToFee = IdentityFee<Balance>;
-	type FeeMultiplierUpdate = ();
+	type OperationalFeeMultiplier = OperationalFeeMultiplier;
+	type WeightToFee = bp_millau::WeightToFee;
+	type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment<
+		Runtime,
+		TargetBlockFullness,
+		AdjustmentVariable,
+		MinimumMultiplier,
+	>;
 }
 
 impl pallet_sudo::Config for Runtime {
@@ -357,10 +373,11 @@ parameter_types! {
 	pub const GetDeliveryConfirmationTransactionFee: Balance =
 		bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _;
 	pub const RootAccountForPayments: Option<AccountId> = None;
+	pub const RialtoChainId: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID;
 }
 
 /// Instance of the messages pallet used to relay messages to/from Rialto chain.
-pub type WithRialtoMessagesInstance = pallet_bridge_messages::DefaultInstance;
+pub type WithRialtoMessagesInstance = ();
 
 impl pallet_bridge_messages::Config<WithRialtoMessagesInstance> for Runtime {
 	type Event = Event;
@@ -382,16 +399,45 @@ impl pallet_bridge_messages::Config<WithRialtoMessagesInstance> for Runtime {
 
 	type TargetHeaderChain = crate::rialto_messages::Rialto;
 	type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier;
-	type MessageDeliveryAndDispatchPayment = pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
-		Runtime,
-		pallet_balances::Pallet<Runtime>,
-		GetDeliveryConfirmationTransactionFee,
-		RootAccountForPayments,
-	>;
-	type OnDeliveryConfirmed = ();
+	type MessageDeliveryAndDispatchPayment =
+		pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
+			Runtime,
+			(),
+			pallet_balances::Pallet<Runtime>,
+			GetDeliveryConfirmationTransactionFee,
+			RootAccountForPayments,
+		>;
+	type OnMessageAccepted = ();
+	type OnDeliveryConfirmed =
+		pallet_bridge_token_swap::Pallet<Runtime, WithRialtoTokenSwapInstance>;
 
 	type SourceHeaderChain = crate::rialto_messages::Rialto;
 	type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch;
+	type BridgedChainId = RialtoChainId;
+}
+
+parameter_types! {
+	pub const TokenSwapMessagesLane: bp_messages::LaneId = *b"swap";
+}
+
+/// Instance of the with-Rialto token swap pallet.
+pub type WithRialtoTokenSwapInstance = ();
+
+impl pallet_bridge_token_swap::Config<WithRialtoTokenSwapInstance> for Runtime {
+	type Event = Event;
+	type WeightInfo = ();
+
+	type BridgedChainId = RialtoChainId;
+	type OutboundMessageLaneId = TokenSwapMessagesLane;
+	#[cfg(not(feature = "runtime-benchmarks"))]
+	type MessagesBridge = pallet_bridge_messages::Pallet<Runtime, WithRialtoMessagesInstance>;
+	#[cfg(feature = "runtime-benchmarks")]
+	type MessagesBridge = bp_messages::source_chain::NoopMessagesBridge;
+	type ThisCurrency = pallet_balances::Pallet<Runtime>;
+	type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter;
+
+	type BridgedChain = bp_rialto::Rialto;
+	type FromBridgedToThisAccountIdConverter = bp_millau::AccountIdConverter;
 }
 
 construct_runtime!(
@@ -400,20 +446,30 @@ construct_runtime!(
 		NodeBlock = opaque::Block,
 		UncheckedExtrinsic = UncheckedExtrinsic
 	{
-		BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event<T>},
-		BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event<T>},
-		BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage},
-		BridgeWestendGrandpa: pallet_bridge_grandpa::<Instance1>::{Pallet, Call, Config<T>, Storage},
 		System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
-		RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage},
-		Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
+		Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
+
+		// Must be before session.
 		Aura: pallet_aura::{Pallet, Config<T>},
-		Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event},
+
+		Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
 		Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
 		TransactionPayment: pallet_transaction_payment::{Pallet, Storage},
-		Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
+
+		// Consensus support.
 		Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>},
+		Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event},
 		ShiftSessionManager: pallet_shift_session_manager::{Pallet},
+		RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage},
+
+		// Rialto bridge modules.
+		BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage},
+		BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event<T>},
+		BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event<T>, Config<T>},
+		BridgeRialtoTokenSwap: pallet_bridge_token_swap::{Pallet, Call, Storage, Event<T>},
+
+		// Westend bridge modules.
+		BridgeWestendGrandpa: pallet_bridge_grandpa::<Instance1>::{Pallet, Call, Config<T>, Storage},
 	}
 );
 
@@ -449,7 +505,7 @@ pub type Executive = frame_executive::Executive<
 	Block,
 	frame_system::ChainContext<Runtime>,
 	Runtime,
-	AllPalletsWithSystem,
+	AllPallets,
 >;
 
 impl_runtime_apis! {
@@ -522,7 +578,7 @@ impl_runtime_apis! {
 		}
 
 		fn authorities() -> Vec<AuraId> {
-			Aura::authorities()
+			Aura::authorities().to_vec()
 		}
 	}
 
@@ -551,6 +607,10 @@ impl_runtime_apis! {
 	}
 
 	impl fg_primitives::GrandpaApi<Block> for Runtime {
+		fn current_set_id() -> fg_primitives::SetId {
+			Grandpa::current_set_id()
+		}
+
 		fn grandpa_authorities() -> GrandpaAuthorityList {
 			Grandpa::grandpa_authorities()
 		}
@@ -619,20 +679,11 @@ impl_runtime_apis! {
 			begin: bp_messages::MessageNonce,
 			end: bp_messages::MessageNonce,
 		) -> Vec<bp_messages::MessageDetails<Balance>> {
-			(begin..=end).filter_map(|nonce| {
-				let message_data = BridgeRialtoMessages::outbound_message_data(lane, nonce)?;
-				let decoded_payload = rialto_messages::ToRialtoMessagePayload::decode(
-					&mut &message_data.payload[..]
-				).ok()?;
-				Some(bp_messages::MessageDetails {
-					nonce,
-					dispatch_weight: decoded_payload.weight,
-					size: message_data.payload.len() as _,
-					delivery_and_dispatch_fee: message_data.fee,
-					dispatch_fee_payment: decoded_payload.dispatch_fee_payment,
-				})
-			})
-			.collect()
+			bridge_runtime_common::messages_api::outbound_message_details::<
+				Runtime,
+				WithRialtoMessagesInstance,
+				WithRialtoMessageBridge,
+			>(lane, begin, end)
 		}
 
 		fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce {
@@ -657,6 +708,67 @@ impl_runtime_apis! {
 			BridgeRialtoMessages::inbound_unrewarded_relayers_state(lane)
 		}
 	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	impl frame_benchmarking::Benchmark<Block> for Runtime {
+		fn benchmark_metadata(extra: bool) -> (
+			Vec<frame_benchmarking::BenchmarkList>,
+			Vec<frame_support::traits::StorageInfo>,
+		) {
+			use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList};
+			use frame_support::traits::StorageInfoTrait;
+
+			let mut list = Vec::<BenchmarkList>::new();
+
+			list_benchmark!(list, extra, pallet_bridge_token_swap, BridgeRialtoTokenSwap);
+
+			let storage_info = AllPalletsWithSystem::storage_info();
+
+			return (list, storage_info)
+		}
+
+		fn dispatch_benchmark(
+			config: frame_benchmarking::BenchmarkConfig,
+		) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
+			use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey, add_benchmark};
+
+			let whitelist: Vec<TrackedStorageKey> = vec![
+				// Block Number
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
+				// Execution Phase
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
+				// Event Count
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
+				// System Events
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
+				// Caller 0 Account
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec().into(),
+			];
+
+			let mut batches = Vec::<BenchmarkBatch>::new();
+			let params = (&config, &whitelist);
+
+			use pallet_bridge_token_swap::benchmarking::Config as TokenSwapConfig;
+
+			impl TokenSwapConfig<WithRialtoTokenSwapInstance> for Runtime {
+				fn initialize_environment() {
+					let relayers_fund_account = pallet_bridge_messages::relayer_fund_account_id::<
+						bp_millau::AccountId,
+						bp_millau::AccountIdConverter,
+					>();
+					pallet_balances::Pallet::<Runtime>::make_free_balance_be(
+						&relayers_fund_account,
+						Balance::MAX / 100,
+					);
+				}
+			}
+
+			add_benchmark!(params, batches, pallet_bridge_token_swap, BridgeRialtoTokenSwap);
+
+			if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
+			Ok(batches)
+		}
+	}
 }
 
 /// Rialto account ownership digest from Millau.
@@ -698,6 +810,7 @@ mod tests {
 			bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT,
 			bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
 			bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT,
+			DbWeight::get(),
 		);
 
 		let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add(
@@ -707,21 +820,31 @@ mod tests {
 			bp_millau::max_extrinsic_size(),
 			bp_millau::max_extrinsic_weight(),
 			max_incoming_message_proof_size,
-			messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight()),
+			messages::target::maximal_incoming_message_dispatch_weight(
+				bp_millau::max_extrinsic_weight(),
+			),
 		);
 
-		let max_incoming_inbound_lane_data_proof_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
-			bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
-			bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
-			bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
-		)
-		.unwrap_or(u32::MAX);
+		let max_incoming_inbound_lane_data_proof_size =
+			bp_messages::InboundLaneData::<()>::encoded_size_hint(
+				bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
+				bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE as _,
+				bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE as _,
+			)
+			.unwrap_or(u32::MAX);
 		pallet_bridge_messages::ensure_able_to_receive_confirmation::<Weights>(
 			bp_millau::max_extrinsic_size(),
 			bp_millau::max_extrinsic_weight(),
 			max_incoming_inbound_lane_data_proof_size,
 			bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 			bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+			DbWeight::get(),
 		);
 	}
+
+	#[test]
+	fn call_size() {
+		const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests
+		assert!(core::mem::size_of::<Call>() <= MAX_CALL_SIZE);
+	}
 }
diff --git a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs
index 12af2c328521b97bbc5e47de6325e2996e006bcd..6d9677c45cf91be170b362c2a3c24807abd7029a 100644
--- a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs
+++ b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs
@@ -31,25 +31,34 @@ use frame_support::{
 	weights::{DispatchClass, Weight},
 	RuntimeDebug,
 };
-use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128};
+use scale_info::TypeInfo;
+use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128};
 use sp_std::{convert::TryFrom, ops::RangeInclusive};
 
 /// Initial value of `RialtoToMillauConversionRate` parameter.
-pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
+pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 =
+	FixedU128::from_inner(FixedU128::DIV);
+/// Initial value of `RialtoFeeMultiplier` parameter.
+pub const INITIAL_RIALTO_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
 
 parameter_types! {
 	/// Rialto to Millau conversion rate. Initially we treat both tokens as equal.
 	pub storage RialtoToMillauConversionRate: FixedU128 = INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE;
+	/// Fee multiplier value at Rialto chain.
+	pub storage RialtoFeeMultiplier: FixedU128 = INITIAL_RIALTO_FEE_MULTIPLIER;
 }
 
 /// Message payload for Millau -> Rialto messages.
-pub type ToRialtoMessagePayload = messages::source::FromThisChainMessagePayload<WithRialtoMessageBridge>;
+pub type ToRialtoMessagePayload =
+	messages::source::FromThisChainMessagePayload<WithRialtoMessageBridge>;
 
 /// Message verifier for Millau -> Rialto messages.
-pub type ToRialtoMessageVerifier = messages::source::FromThisChainMessageVerifier<WithRialtoMessageBridge>;
+pub type ToRialtoMessageVerifier =
+	messages::source::FromThisChainMessageVerifier<WithRialtoMessageBridge>;
 
 /// Message payload for Rialto -> Millau messages.
-pub type FromRialtoMessagePayload = messages::target::FromBridgedChainMessagePayload<WithRialtoMessageBridge>;
+pub type FromRialtoMessagePayload =
+	messages::target::FromBridgedChainMessagePayload<WithRialtoMessageBridge>;
 
 /// Encoded Millau Call as it comes from Rialto.
 pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
@@ -58,14 +67,15 @@ pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessag
 type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof<bp_rialto::Hash>;
 
 /// Messages delivery proof for Millau -> Rialto messages.
-type ToRialtoMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof<bp_rialto::Hash>;
+type ToRialtoMessagesDeliveryProof =
+	messages::source::FromBridgedChainMessagesDeliveryProof<bp_rialto::Hash>;
 
 /// Call-dispatch based message dispatch for Rialto -> Millau messages.
 pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch<
 	WithRialtoMessageBridge,
 	crate::Runtime,
 	pallet_balances::Pallet<Runtime>,
-	pallet_bridge_dispatch::DefaultInstance,
+	(),
 >;
 
 /// Millau <-> Rialto message bridge.
@@ -76,14 +86,16 @@ impl MessageBridge for WithRialtoMessageBridge {
 	const RELAYER_FEE_PERCENT: u32 = 10;
 	const THIS_CHAIN_ID: ChainId = MILLAU_CHAIN_ID;
 	const BRIDGED_CHAIN_ID: ChainId = RIALTO_CHAIN_ID;
+	const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME;
 
 	type ThisChain = Millau;
 	type BridgedChain = Rialto;
-	type BridgedMessagesInstance = crate::WithRialtoMessagesInstance;
 
 	fn bridged_balance_to_this_balance(bridged_balance: bp_rialto::Balance) -> bp_millau::Balance {
-		bp_millau::Balance::try_from(RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance))
-			.unwrap_or(bp_millau::Balance::MAX)
+		bp_millau::Balance::try_from(
+			RialtoToMillauConversionRate::get().saturating_mul_int(bridged_balance),
+		)
+		.unwrap_or(bp_millau::Balance::MAX)
 	}
 }
 
@@ -104,7 +116,9 @@ impl messages::ThisChainWithMessages for Millau {
 	type Call = crate::Call;
 
 	fn is_outbound_lane_enabled(lane: &LaneId) -> bool {
-		*lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1]
+		*lane == [0, 0, 0, 0] ||
+			*lane == [0, 0, 0, 1] ||
+			*lane == crate::TokenSwapMessagesLane::get()
 	}
 
 	fn maximal_pending_messages_at_outbound_lane() -> MessageNonce {
@@ -128,11 +142,15 @@ impl messages::ThisChainWithMessages for Millau {
 	}
 
 	fn transaction_payment(transaction: MessageTransaction<Weight>) -> bp_millau::Balance {
+		// `transaction` may represent transaction from the future, when multiplier value will
+		// be larger, so let's use slightly increased value
+		let multiplier = FixedU128::saturating_from_rational(110, 100)
+			.saturating_mul(pallet_transaction_payment::Pallet::<Runtime>::next_fee_multiplier());
 		// in our testnets, both per-byte fee and weight-to-fee are 1:1
 		messages::transaction_payment(
 			bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic,
 			1,
-			FixedU128::zero(),
+			multiplier,
 			|weight| weight as _,
 			transaction,
 		)
@@ -159,12 +177,15 @@ impl messages::BridgedChainWithMessages for Rialto {
 
 	fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive<Weight> {
 		// we don't want to relay too large messages + keep reserve for future upgrades
-		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_rialto::max_extrinsic_weight());
+		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(
+			bp_rialto::max_extrinsic_weight(),
+		);
 
-		// we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` function
+		// we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment`
+		// function
 		//
-		// this bridge may be used to deliver all kind of messages, so we're not making any assumptions about
-		// minimal dispatch weight here
+		// this bridge may be used to deliver all kind of messages, so we're not making any
+		// assumptions about minimal dispatch weight here
 
 		0..=upper_limit
 	}
@@ -195,11 +216,14 @@ impl messages::BridgedChainWithMessages for Rialto {
 	}
 
 	fn transaction_payment(transaction: MessageTransaction<Weight>) -> bp_rialto::Balance {
+		// we don't have a direct access to the value of multiplier at Rialto chain
+		// => it is a messages module parameter
+		let multiplier = RialtoFeeMultiplier::get();
 		// in our testnets, both per-byte fee and weight-to-fee are 1:1
 		messages::transaction_payment(
 			bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic,
 			1,
-			FixedU128::zero(),
+			multiplier,
 			|weight| weight as _,
 			transaction,
 		)
@@ -221,9 +245,11 @@ impl TargetHeaderChain<ToRialtoMessagePayload, bp_rialto::AccountId> for Rialto
 	fn verify_messages_delivery_proof(
 		proof: Self::MessagesDeliveryProof,
 	) -> Result<(LaneId, InboundLaneData<bp_millau::AccountId>), Self::Error> {
-		messages::source::verify_messages_delivery_proof::<WithRialtoMessageBridge, Runtime, crate::RialtoGrandpaInstance>(
-			proof,
-		)
+		messages::source::verify_messages_delivery_proof::<
+			WithRialtoMessageBridge,
+			Runtime,
+			crate::RialtoGrandpaInstance,
+		>(proof)
 	}
 }
 
@@ -240,15 +266,16 @@ impl SourceHeaderChain<bp_rialto::Balance> for Rialto {
 		proof: Self::MessagesProof,
 		messages_count: u32,
 	) -> Result<ProvedMessages<Message<bp_rialto::Balance>>, Self::Error> {
-		messages::target::verify_messages_proof::<WithRialtoMessageBridge, Runtime, crate::RialtoGrandpaInstance>(
-			proof,
-			messages_count,
-		)
+		messages::target::verify_messages_proof::<
+			WithRialtoMessageBridge,
+			Runtime,
+			crate::RialtoGrandpaInstance,
+		>(proof, messages_count)
 	}
 }
 
 /// Millau -> Rialto message lane pallet parameters.
-#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)]
+#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
 pub enum MillauToRialtoMessagesParameter {
 	/// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`.
 	RialtoToMillauConversionRate(FixedU128),
@@ -257,9 +284,8 @@ pub enum MillauToRialtoMessagesParameter {
 impl MessagesParameter for MillauToRialtoMessagesParameter {
 	fn save(&self) {
 		match *self {
-			MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => {
-				RialtoToMillauConversionRate::set(conversion_rate)
-			}
+			MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) =>
+				RialtoToMillauConversionRate::set(conversion_rate),
 		}
 	}
 }
diff --git a/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..8adc998e47ee38b1eeebb0e8b280cd01ddf3d056
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml
@@ -0,0 +1,89 @@
+[package]
+name = "rialto-parachain-collator"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+homepage = "https://substrate.dev"
+repository = "https://github.com/paritytech/parity-bridges-common/"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[build-dependencies]
+substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+[[bin]]
+name = 'rialto-parachain-collator'
+
+[features]
+default = []
+runtime-benchmarks = ['rialto-parachain-runtime/runtime-benchmarks']
+
+[dependencies]
+derive_more = '0.99.2'
+log = '0.4.14'
+codec = { package = 'parity-scale-codec', version = '2.0.0' }
+structopt = '0.3.8'
+serde = { version = '1.0', features = ['derive'] }
+hex-literal = '0.3.1'
+
+# RPC related Dependencies
+jsonrpc-core = '18.0'
+
+# Local Dependencies
+rialto-parachain-runtime = { path = '../runtime' }
+
+# Substrate Dependencies
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" }
+frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
+substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+## Substrate Client Dependencies
+sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", features = ['wasmtime'] }
+sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+## Substrate Primitive Dependencies
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+# Cumulus dependencies
+cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-client-collator = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "master" }
+
+# Polkadot dependencies
+polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-test-service = { git = "https://github.com/paritytech/polkadot", branch = "master" }
\ No newline at end of file
diff --git a/polkadot/bridges/bin/rialto-parachain/node/build.rs b/polkadot/bridges/bin/rialto-parachain/node/build.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8ba8a31e9a79fdf45e93c7efb49470e207f90049
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/build.rs
@@ -0,0 +1,22 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed};
+
+fn main() {
+	generate_cargo_keys();
+	rerun_if_git_head_changed();
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f93887a21e47918d42c1cc34e8e147be426857c5
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs
@@ -0,0 +1,165 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use cumulus_primitives_core::ParaId;
+use rialto_parachain_runtime::{AccountId, AuraId, Signature};
+use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup};
+use sc_service::ChainType;
+use serde::{Deserialize, Serialize};
+use sp_core::{sr25519, Pair, Public};
+use sp_runtime::traits::{IdentifyAccount, Verify};
+
+/// Specialized `ChainSpec` for the normal parachain runtime.
+pub type ChainSpec =
+	sc_service::GenericChainSpec<rialto_parachain_runtime::GenesisConfig, Extensions>;
+
+/// Helper function to generate a crypto pair from seed
+pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
+	TPublic::Pair::from_string(&format!("//{}", seed), None)
+		.expect("static values are valid; qed")
+		.public()
+}
+
+/// The extensions for the [`ChainSpec`].
+#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)]
+#[serde(deny_unknown_fields)]
+pub struct Extensions {
+	/// The relay chain of the Parachain.
+	pub relay_chain: String,
+	/// The id of the Parachain.
+	pub para_id: u32,
+}
+
+impl Extensions {
+	/// Try to get the extension from the given `ChainSpec`.
+	pub fn try_get(chain_spec: &dyn sc_service::ChainSpec) -> Option<&Self> {
+		sc_chain_spec::get_extension(chain_spec.extensions())
+	}
+}
+
+type AccountPublic = <Signature as Verify>::Signer;
+
+/// Helper function to generate an account ID from seed
+pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId
+where
+	AccountPublic: From<<TPublic::Pair as Pair>::Public>,
+{
+	AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
+}
+
+pub fn development_config(id: ParaId) -> ChainSpec {
+	// Give your base currency a unit name and decimal places
+	let mut properties = sc_chain_spec::Properties::new();
+	properties.insert("tokenSymbol".into(), "UNIT".into());
+	properties.insert("tokenDecimals".into(), 12.into());
+
+	ChainSpec::from_genesis(
+		// Name
+		"Development",
+		// ID
+		"dev",
+		ChainType::Local,
+		move || {
+			testnet_genesis(
+				get_account_id_from_seed::<sr25519::Public>("Alice"),
+				vec![get_from_seed::<AuraId>("Alice"), get_from_seed::<AuraId>("Bob")],
+				vec![
+					get_account_id_from_seed::<sr25519::Public>("Alice"),
+					get_account_id_from_seed::<sr25519::Public>("Bob"),
+					get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
+				],
+				id,
+			)
+		},
+		vec![],
+		None,
+		None,
+		None,
+		Extensions {
+			relay_chain: "rococo-local".into(), // You MUST set this to the correct network!
+			para_id: id.into(),
+		},
+	)
+}
+
+pub fn local_testnet_config(id: ParaId) -> ChainSpec {
+	// Give your base currency a unit name and decimal places
+	let mut properties = sc_chain_spec::Properties::new();
+	properties.insert("tokenSymbol".into(), "UNIT".into());
+	properties.insert("tokenDecimals".into(), 12.into());
+
+	ChainSpec::from_genesis(
+		// Name
+		"Local Testnet",
+		// ID
+		"local_testnet",
+		ChainType::Local,
+		move || {
+			testnet_genesis(
+				get_account_id_from_seed::<sr25519::Public>("Alice"),
+				vec![get_from_seed::<AuraId>("Alice"), get_from_seed::<AuraId>("Bob")],
+				vec![
+					get_account_id_from_seed::<sr25519::Public>("Alice"),
+					get_account_id_from_seed::<sr25519::Public>("Bob"),
+					get_account_id_from_seed::<sr25519::Public>("Charlie"),
+					get_account_id_from_seed::<sr25519::Public>("Dave"),
+					get_account_id_from_seed::<sr25519::Public>("Eve"),
+					get_account_id_from_seed::<sr25519::Public>("Ferdie"),
+					get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
+					get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
+				],
+				id,
+			)
+		},
+		Vec::new(),
+		None,
+		None,
+		None,
+		Extensions {
+			relay_chain: "rococo-local".into(), // You MUST set this to the correct network!
+			para_id: id.into(),
+		},
+	)
+}
+
+fn testnet_genesis(
+	root_key: AccountId,
+	initial_authorities: Vec<AuraId>,
+	endowed_accounts: Vec<AccountId>,
+	id: ParaId,
+) -> rialto_parachain_runtime::GenesisConfig {
+	rialto_parachain_runtime::GenesisConfig {
+		system: rialto_parachain_runtime::SystemConfig {
+			code: rialto_parachain_runtime::WASM_BINARY
+				.expect("WASM binary was not build, please build it!")
+				.to_vec(),
+			changes_trie_config: Default::default(),
+		},
+		balances: rialto_parachain_runtime::BalancesConfig {
+			balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(),
+		},
+		sudo: rialto_parachain_runtime::SudoConfig { key: root_key },
+		parachain_info: rialto_parachain_runtime::ParachainInfoConfig { parachain_id: id },
+		aura: rialto_parachain_runtime::AuraConfig { authorities: initial_authorities },
+		aura_ext: Default::default(),
+		// parachain_system: Default::default(),
+	}
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs b/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bc2238e2fd44e687661964e43c8d9978cfbd8ec8
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs
@@ -0,0 +1,137 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate::chain_spec;
+use std::path::PathBuf;
+use structopt::StructOpt;
+
+/// Sub-commands supported by the collator.
+#[derive(Debug, StructOpt)]
+pub enum Subcommand {
+	/// Export the genesis state of the parachain.
+	#[structopt(name = "export-genesis-state")]
+	ExportGenesisState(ExportGenesisStateCommand),
+
+	/// Export the genesis wasm of the parachain.
+	#[structopt(name = "export-genesis-wasm")]
+	ExportGenesisWasm(ExportGenesisWasmCommand),
+
+	/// Build a chain specification.
+	BuildSpec(sc_cli::BuildSpecCmd),
+
+	/// Validate blocks.
+	CheckBlock(sc_cli::CheckBlockCmd),
+
+	/// Export blocks.
+	ExportBlocks(sc_cli::ExportBlocksCmd),
+
+	/// Export the state of a given block into a chain spec.
+	ExportState(sc_cli::ExportStateCmd),
+
+	/// Import blocks.
+	ImportBlocks(sc_cli::ImportBlocksCmd),
+
+	/// Remove the whole chain.
+	PurgeChain(cumulus_client_cli::PurgeChainCmd),
+
+	/// Revert the chain to a previous state.
+	Revert(sc_cli::RevertCmd),
+
+	/// The custom benchmark subcommmand benchmarking runtime pallets.
+	#[structopt(name = "benchmark", about = "Benchmark runtime pallets.")]
+	Benchmark(frame_benchmarking_cli::BenchmarkCmd),
+}
+
+/// Command for exporting the genesis state of the parachain
+#[derive(Debug, StructOpt)]
+pub struct ExportGenesisStateCommand {
+	/// Output file name or stdout if unspecified.
+	#[structopt(parse(from_os_str))]
+	pub output: Option<PathBuf>,
+
+	/// Id of the parachain this state is for.
+	///
+	/// Default: 100
+	#[structopt(long, conflicts_with = "chain")]
+	pub parachain_id: Option<u32>,
+
+	/// Write output in binary. Default is to write in hex.
+	#[structopt(short, long)]
+	pub raw: bool,
+
+	/// The name of the chain for that the genesis state should be exported.
+	#[structopt(long, conflicts_with = "parachain-id")]
+	pub chain: Option<String>,
+}
+
+/// Command for exporting the genesis wasm file.
+#[derive(Debug, StructOpt)]
+pub struct ExportGenesisWasmCommand {
+	/// Output file name or stdout if unspecified.
+	#[structopt(parse(from_os_str))]
+	pub output: Option<PathBuf>,
+
+	/// Write output in binary. Default is to write in hex.
+	#[structopt(short, long)]
+	pub raw: bool,
+
+	/// The name of the chain for that the genesis wasm file should be exported.
+	#[structopt(long)]
+	pub chain: Option<String>,
+}
+
+#[derive(Debug, StructOpt)]
+#[structopt(settings = &[
+	structopt::clap::AppSettings::GlobalVersion,
+	structopt::clap::AppSettings::ArgsNegateSubcommands,
+	structopt::clap::AppSettings::SubcommandsNegateReqs,
+])]
+pub struct Cli {
+	#[structopt(subcommand)]
+	pub subcommand: Option<Subcommand>,
+
+	#[structopt(flatten)]
+	pub run: cumulus_client_cli::RunCmd,
+
+	/// Relaychain arguments
+	#[structopt(raw = true)]
+	pub relaychain_args: Vec<String>,
+}
+
+#[derive(Debug)]
+pub struct RelayChainCli {
+	/// The actual relay chain cli object.
+	pub base: polkadot_cli::RunCmd,
+
+	/// Optional chain id that should be passed to the relay chain.
+	pub chain_id: Option<String>,
+
+	/// The base path that should be used by the relay chain.
+	pub base_path: Option<PathBuf>,
+}
+
+impl RelayChainCli {
+	/// Parse the relay chain CLI parameters using the para chain `Configuration`.
+	pub fn new<'a>(
+		para_config: &sc_service::Configuration,
+		relay_chain_args: impl Iterator<Item = &'a String>,
+	) -> Self {
+		let extension = chain_spec::Extensions::try_get(&*para_config.chain_spec);
+		let chain_id = extension.map(|e| e.relay_chain.clone());
+		let base_path = para_config.base_path.as_ref().map(|x| x.path().join("rialto-bridge-node"));
+		Self { base_path, chain_id, base: polkadot_cli::RunCmd::from_iter(relay_chain_args) }
+	}
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/command.rs b/polkadot/bridges/bin/rialto-parachain/node/src/command.rs
new file mode 100644
index 0000000000000000000000000000000000000000..eb9aba2c104ba12a39d0fea6e96679cfe0b41c9d
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/command.rs
@@ -0,0 +1,424 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate::{
+	chain_spec,
+	cli::{Cli, RelayChainCli, Subcommand},
+	service::{new_partial, ParachainRuntimeExecutor},
+};
+use codec::Encode;
+use cumulus_client_service::genesis::generate_genesis_block;
+use cumulus_primitives_core::ParaId;
+use log::info;
+use polkadot_parachain::primitives::AccountIdConversion;
+use rialto_parachain_runtime::{Block, RuntimeApi};
+use sc_cli::{
+	ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams,
+	NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli,
+};
+use sc_service::config::{BasePath, PrometheusConfig};
+use sp_core::hexdisplay::HexDisplay;
+use sp_runtime::traits::Block as BlockT;
+use std::{io::Write, net::SocketAddr};
+
+fn load_spec(
+	id: &str,
+	para_id: ParaId,
+) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
+	Ok(match id {
+		"dev" => Box::new(chain_spec::development_config(para_id)),
+		"" | "local" => Box::new(chain_spec::local_testnet_config(para_id)),
+		path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?),
+	})
+}
+
+impl SubstrateCli for Cli {
+	fn impl_name() -> String {
+		"Parachain Collator Template".into()
+	}
+
+	fn impl_version() -> String {
+		env!("SUBSTRATE_CLI_IMPL_VERSION").into()
+	}
+
+	fn description() -> String {
+		format!(
+			"Parachain Collator Template\n\nThe command-line arguments provided first will be \
+		passed to the parachain node, while the arguments provided after -- will be passed \
+		to the relaychain node.\n\n\
+		{} [parachain-args] -- [relaychain-args]",
+			Self::executable_name()
+		)
+	}
+
+	fn author() -> String {
+		env!("CARGO_PKG_AUTHORS").into()
+	}
+
+	fn support_url() -> String {
+		"https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into()
+	}
+
+	fn copyright_start_year() -> i32 {
+		2017
+	}
+
+	fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
+		load_spec(id, self.run.parachain_id.unwrap_or(2000).into())
+	}
+
+	fn native_runtime_version(_: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
+		&rialto_parachain_runtime::VERSION
+	}
+}
+
+impl SubstrateCli for RelayChainCli {
+	fn impl_name() -> String {
+		"Parachain Collator Template".into()
+	}
+
+	fn impl_version() -> String {
+		env!("SUBSTRATE_CLI_IMPL_VERSION").into()
+	}
+
+	fn description() -> String {
+		"Parachain Collator Template\n\nThe command-line arguments provided first will be \
+		passed to the parachain node, while the arguments provided after -- will be passed \
+		to the relaychain node.\n\n\
+		parachain-collator [parachain-args] -- [relaychain-args]"
+			.into()
+	}
+
+	fn author() -> String {
+		env!("CARGO_PKG_AUTHORS").into()
+	}
+
+	fn support_url() -> String {
+		"https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into()
+	}
+
+	fn copyright_start_year() -> i32 {
+		2017
+	}
+
+	fn load_spec(&self, id: &str) -> std::result::Result<Box<dyn sc_service::ChainSpec>, String> {
+		polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id)
+	}
+
+	fn native_runtime_version(chain_spec: &Box<dyn ChainSpec>) -> &'static RuntimeVersion {
+		polkadot_cli::Cli::native_runtime_version(chain_spec)
+	}
+}
+
+fn extract_genesis_wasm(chain_spec: &dyn sc_service::ChainSpec) -> Result<Vec<u8>> {
+	let mut storage = chain_spec.build_storage()?;
+
+	storage
+		.top
+		.remove(sp_core::storage::well_known_keys::CODE)
+		.ok_or_else(|| "Could not find wasm file in genesis state!".into())
+}
+
+macro_rules! construct_async_run {
+	(|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{
+		let runner = $cli.create_runner($cmd)?;
+		runner.async_run(|$config| {
+			let $components = new_partial::<
+				RuntimeApi,
+				ParachainRuntimeExecutor,
+				_
+			>(
+				&$config,
+				crate::service::parachain_build_import_queue,
+			)?;
+			let task_manager = $components.task_manager;
+			{ $( $code )* }.map(|v| (v, task_manager))
+		})
+	}}
+}
+
+/// Parse command line arguments into service configuration.
+pub fn run() -> Result<()> {
+	let cli = Cli::from_args();
+	sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::Custom(
+		rialto_parachain_runtime::SS58Prefix::get() as u16,
+	));
+
+	match &cli.subcommand {
+		Some(Subcommand::BuildSpec(cmd)) => {
+			let runner = cli.create_runner(cmd)?;
+			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
+		},
+		Some(Subcommand::CheckBlock(cmd)) => {
+			construct_async_run!(|components, cli, cmd, config| {
+				Ok(cmd.run(components.client, components.import_queue))
+			})
+		},
+		Some(Subcommand::ExportBlocks(cmd)) => {
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, config.database)
+			))
+		},
+		Some(Subcommand::ExportState(cmd)) => {
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, config.chain_spec)
+			))
+		},
+		Some(Subcommand::ImportBlocks(cmd)) => {
+			construct_async_run!(|components, cli, cmd, config| {
+				Ok(cmd.run(components.client, components.import_queue))
+			})
+		},
+		Some(Subcommand::PurgeChain(cmd)) => {
+			let runner = cli.create_runner(cmd)?;
+
+			runner.sync_run(|config| {
+				let polkadot_cli = RelayChainCli::new(
+					&config,
+					[RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()),
+				);
+
+				let polkadot_config = SubstrateCli::create_configuration(
+					&polkadot_cli,
+					&polkadot_cli,
+					config.tokio_handle.clone(),
+				)
+				.map_err(|err| format!("Relay chain argument error: {}", err))?;
+
+				cmd.run(config, polkadot_config)
+			})
+		},
+		Some(Subcommand::Revert(cmd)) => {
+			construct_async_run!(|components, cli, cmd, config| Ok(
+				cmd.run(components.client, components.backend)
+			))
+		},
+		Some(Subcommand::ExportGenesisState(params)) => {
+			let mut builder = sc_cli::LoggerBuilder::new("");
+			builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
+			let _ = builder.init();
+
+			let block: Block = generate_genesis_block(&load_spec(
+				&params.chain.clone().unwrap_or_default(),
+				params.parachain_id.expect("Missing ParaId").into(),
+			)?)?;
+			let raw_header = block.header().encode();
+			let output_buf = if params.raw {
+				raw_header
+			} else {
+				format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes()
+			};
+
+			if let Some(output) = &params.output {
+				std::fs::write(output, output_buf)?;
+			} else {
+				std::io::stdout().write_all(&output_buf)?;
+			}
+
+			Ok(())
+		},
+		Some(Subcommand::ExportGenesisWasm(params)) => {
+			let mut builder = sc_cli::LoggerBuilder::new("");
+			builder.with_profiling(sc_tracing::TracingReceiver::Log, "");
+			let _ = builder.init();
+
+			let raw_wasm_blob =
+				extract_genesis_wasm(&*cli.load_spec(&params.chain.clone().unwrap_or_default())?)?;
+			let output_buf = if params.raw {
+				raw_wasm_blob
+			} else {
+				format!("0x{:?}", HexDisplay::from(&raw_wasm_blob)).into_bytes()
+			};
+
+			if let Some(output) = &params.output {
+				std::fs::write(output, output_buf)?;
+			} else {
+				std::io::stdout().write_all(&output_buf)?;
+			}
+
+			Ok(())
+		},
+		Some(Subcommand::Benchmark(cmd)) =>
+			if cfg!(feature = "runtime-benchmarks") {
+				let runner = cli.create_runner(cmd)?;
+
+				runner.sync_run(|config| cmd.run::<Block, ParachainRuntimeExecutor>(config))
+			} else {
+				Err("Benchmarking wasn't enabled when building the node. \
+				You can enable it with `--features runtime-benchmarks`."
+					.into())
+			},
+		None => {
+			let runner = cli.create_runner(&cli.run.normalize())?;
+
+			runner.run_node_until_exit(|config| async move {
+				let para_id =
+					chain_spec::Extensions::try_get(&*config.chain_spec).map(|e| e.para_id);
+
+				let polkadot_cli = RelayChainCli::new(
+					&config,
+					[RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()),
+				);
+
+				let id = ParaId::from(cli.run.parachain_id.or(para_id).expect("Missing ParaId"));
+
+				let parachain_account =
+					AccountIdConversion::<polkadot_primitives::v0::AccountId>::into_account(&id);
+
+				let block: Block =
+					generate_genesis_block(&config.chain_spec).map_err(|e| format!("{:?}", e))?;
+				let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode()));
+
+				let polkadot_config = SubstrateCli::create_configuration(
+					&polkadot_cli,
+					&polkadot_cli,
+					config.tokio_handle.clone(),
+				)
+				.map_err(|err| format!("Relay chain argument error: {}", err))?;
+
+				info!("Parachain id: {:?}", id);
+				info!("Parachain Account: {}", parachain_account);
+				info!("Parachain genesis state: {}", genesis_state);
+				info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" });
+
+				crate::service::start_node(config, polkadot_config, id)
+					.await
+					.map(|r| r.0)
+					.map_err(Into::into)
+			})
+		},
+	}
+}
+
+impl DefaultConfigurationValues for RelayChainCli {
+	fn p2p_listen_port() -> u16 {
+		30334
+	}
+
+	fn rpc_ws_listen_port() -> u16 {
+		9945
+	}
+
+	fn rpc_http_listen_port() -> u16 {
+		9934
+	}
+
+	fn prometheus_listen_port() -> u16 {
+		9616
+	}
+}
+
+impl CliConfiguration<Self> for RelayChainCli {
+	fn shared_params(&self) -> &SharedParams {
+		self.base.base.shared_params()
+	}
+
+	fn import_params(&self) -> Option<&ImportParams> {
+		self.base.base.import_params()
+	}
+
+	fn network_params(&self) -> Option<&NetworkParams> {
+		self.base.base.network_params()
+	}
+
+	fn keystore_params(&self) -> Option<&KeystoreParams> {
+		self.base.base.keystore_params()
+	}
+
+	fn base_path(&self) -> Result<Option<BasePath>> {
+		Ok(self
+			.shared_params()
+			.base_path()
+			.or_else(|| self.base_path.clone().map(Into::into)))
+	}
+
+	fn rpc_http(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
+		self.base.base.rpc_http(default_listen_port)
+	}
+
+	fn rpc_ipc(&self) -> Result<Option<String>> {
+		self.base.base.rpc_ipc()
+	}
+
+	fn rpc_ws(&self, default_listen_port: u16) -> Result<Option<SocketAddr>> {
+		self.base.base.rpc_ws(default_listen_port)
+	}
+
+	fn prometheus_config(&self, default_listen_port: u16) -> Result<Option<PrometheusConfig>> {
+		self.base.base.prometheus_config(default_listen_port)
+	}
+
+	fn init<C: SubstrateCli>(&self) -> Result<()> {
+		unreachable!("PolkadotCli is never initialized; qed");
+	}
+
+	fn chain_id(&self, is_dev: bool) -> Result<String> {
+		let chain_id = self.base.base.chain_id(is_dev)?;
+
+		Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id })
+	}
+
+	fn role(&self, is_dev: bool) -> Result<sc_service::Role> {
+		self.base.base.role(is_dev)
+	}
+
+	fn transaction_pool(&self) -> Result<sc_service::config::TransactionPoolOptions> {
+		self.base.base.transaction_pool()
+	}
+
+	fn state_cache_child_ratio(&self) -> Result<Option<usize>> {
+		self.base.base.state_cache_child_ratio()
+	}
+
+	fn rpc_methods(&self) -> Result<sc_service::config::RpcMethods> {
+		self.base.base.rpc_methods()
+	}
+
+	fn rpc_ws_max_connections(&self) -> Result<Option<usize>> {
+		self.base.base.rpc_ws_max_connections()
+	}
+
+	fn rpc_cors(&self, is_dev: bool) -> Result<Option<Vec<String>>> {
+		self.base.base.rpc_cors(is_dev)
+	}
+
+	fn default_heap_pages(&self) -> Result<Option<u64>> {
+		self.base.base.default_heap_pages()
+	}
+
+	fn force_authoring(&self) -> Result<bool> {
+		self.base.base.force_authoring()
+	}
+
+	fn disable_grandpa(&self) -> Result<bool> {
+		self.base.base.disable_grandpa()
+	}
+
+	fn max_runtime_instances(&self) -> Result<Option<usize>> {
+		self.base.base.max_runtime_instances()
+	}
+
+	fn announce_block(&self) -> Result<bool> {
+		self.base.base.announce_block()
+	}
+
+	fn telemetry_endpoints(
+		&self,
+		chain_spec: &Box<dyn ChainSpec>,
+	) -> Result<Option<sc_telemetry::TelemetryEndpoints>> {
+		self.base.base.telemetry_endpoints(chain_spec)
+	}
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3ec291596b71946ca6a24347dac29598bdb2ca0b
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs
@@ -0,0 +1,18 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+pub mod chain_spec;
+pub mod service;
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/main.rs b/polkadot/bridges/bin/rialto-parachain/node/src/main.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2b4e0b438d1a9a30524e6755bc14634cef65056b
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/main.rs
@@ -0,0 +1,29 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Substrate Parachain Node Template CLI
+
+#![warn(missing_docs)]
+
+mod chain_spec;
+#[macro_use]
+mod service;
+mod cli;
+mod command;
+
+fn main() -> sc_cli::Result<()> {
+	command::run()
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/service.rs b/polkadot/bridges/bin/rialto-parachain/node/src/service.rs
new file mode 100644
index 0000000000000000000000000000000000000000..65a8e7bb65c579e55b42b8d0fdaef3058f0a4b96
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/node/src/service.rs
@@ -0,0 +1,496 @@
+// Copyright 2020-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+// std
+use std::sync::Arc;
+
+// Local Runtime Types
+use rialto_parachain_runtime::RuntimeApi;
+
+// Cumulus Imports
+use cumulus_client_consensus_aura::{
+	build_aura_consensus, BuildAuraConsensusParams, SlotProportion,
+};
+use cumulus_client_consensus_common::ParachainConsensus;
+use cumulus_client_network::build_block_announce_validator;
+use cumulus_client_service::{
+	prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams,
+};
+use cumulus_primitives_core::ParaId;
+
+// Substrate Imports
+use sc_client_api::ExecutorProvider;
+use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch};
+use sc_network::NetworkService;
+use sc_service::{Configuration, PartialComponents, Role, TFullBackend, TFullClient, TaskManager};
+use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
+use sp_api::ConstructRuntimeApi;
+use sp_consensus::SlotData;
+use sp_keystore::SyncCryptoStorePtr;
+use sp_runtime::traits::BlakeTwo256;
+use substrate_prometheus_endpoint::Registry;
+
+// Runtime type overrides
+type BlockNumber = u32;
+type Header = sp_runtime::generic::Header<BlockNumber, sp_runtime::traits::BlakeTwo256>;
+pub type Block = sp_runtime::generic::Block<Header, sp_runtime::OpaqueExtrinsic>;
+type Hash = sp_core::H256;
+
+pub type ParachainRuntimeExecutor = ExecutorDispatch;
+
+// Our native executor instance.
+pub struct ExecutorDispatch;
+
+impl NativeExecutionDispatch for ExecutorDispatch {
+	type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions;
+
+	fn dispatch(method: &str, data: &[u8]) -> Option<Vec<u8>> {
+		rialto_parachain_runtime::api::dispatch(method, data)
+	}
+
+	fn native_version() -> sc_executor::NativeVersion {
+		rialto_parachain_runtime::native_version()
+	}
+}
+
+/// Starts a `ServiceBuilder` for a full service.
+///
+/// Use this macro if you don't actually need the full service, but just the builder in order to
+/// be able to perform chain operations.
+#[allow(clippy::type_complexity)]
+pub fn new_partial<RuntimeApi, Executor, BIQ>(
+	config: &Configuration,
+	build_import_queue: BIQ,
+) -> Result<
+	PartialComponents<
+		TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		TFullBackend<Block>,
+		(),
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
+		sc_transaction_pool::FullPool<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
+		(Option<Telemetry>, Option<TelemetryWorkerHandle>),
+	>,
+	sc_service::Error,
+>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>
+		+ Send
+		+ Sync
+		+ 'static,
+	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::Metadata<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_api::ApiExt<
+			Block,
+			StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
+		> + sp_offchain::OffchainWorkerApi<Block>
+		+ sp_block_builder::BlockBuilder<Block>,
+	sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
+	Executor: NativeExecutionDispatch + 'static,
+	BIQ: FnOnce(
+		Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		&Configuration,
+		Option<TelemetryHandle>,
+		&TaskManager,
+	) -> Result<
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
+		sc_service::Error,
+	>,
+{
+	let telemetry = config
+		.telemetry_endpoints
+		.clone()
+		.filter(|x| !x.is_empty())
+		.map(|endpoints| -> Result<_, sc_telemetry::Error> {
+			let worker = TelemetryWorker::new(16)?;
+			let telemetry = worker.handle().new_telemetry(endpoints);
+			Ok((worker, telemetry))
+		})
+		.transpose()?;
+
+	let executor = sc_executor::NativeElseWasmExecutor::<Executor>::new(
+		config.wasm_method,
+		config.default_heap_pages,
+		config.max_runtime_instances,
+	);
+
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, _>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
+	let client = Arc::new(client);
+
+	let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+	let telemetry = telemetry.map(|(worker, telemetry)| {
+		task_manager.spawn_handle().spawn("telemetry", worker.run());
+		telemetry
+	});
+
+	let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+		config.transaction_pool.clone(),
+		config.role.is_authority().into(),
+		config.prometheus_registry(),
+		task_manager.spawn_essential_handle(),
+		client.clone(),
+	);
+
+	let import_queue = build_import_queue(
+		client.clone(),
+		config,
+		telemetry.as_ref().map(|telemetry| telemetry.handle()),
+		&task_manager,
+	)?;
+
+	let params = PartialComponents {
+		backend,
+		client,
+		import_queue,
+		keystore_container,
+		task_manager,
+		transaction_pool,
+		select_chain: (),
+		other: (telemetry, telemetry_worker_handle),
+	};
+
+	Ok(params)
+}
+
+/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
+///
+/// This is the actual implementation that is abstract over the executor and the runtime api.
+#[sc_tracing::logging::prefix_logs_with("Parachain")]
+async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
+	parachain_config: Configuration,
+	polkadot_config: Configuration,
+	id: ParaId,
+	rpc_ext_builder: RB,
+	build_import_queue: BIQ,
+	build_consensus: BIC,
+) -> sc_service::error::Result<(
+	TaskManager,
+	Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+)>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>
+		+ Send
+		+ Sync
+		+ 'static,
+	RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::Metadata<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_api::ApiExt<
+			Block,
+			StateBackend = sc_client_api::StateBackendFor<TFullBackend<Block>, Block>,
+		> + sp_offchain::OffchainWorkerApi<Block>
+		+ sp_block_builder::BlockBuilder<Block>
+		+ cumulus_primitives_core::CollectCollationInfo<Block>,
+	sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
+	Executor: NativeExecutionDispatch + 'static,
+	RB: Fn(
+			Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		) -> jsonrpc_core::IoHandler<sc_rpc::Metadata>
+		+ Send
+		+ 'static,
+	BIQ: FnOnce(
+		Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		&Configuration,
+		Option<TelemetryHandle>,
+		&TaskManager,
+	) -> Result<
+		sc_consensus::DefaultImportQueue<
+			Block,
+			TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+		>,
+		sc_service::Error,
+	>,
+	BIC: FnOnce(
+		Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
+		Option<&Registry>,
+		Option<TelemetryHandle>,
+		&TaskManager,
+		&polkadot_service::NewFull<polkadot_service::Client>,
+		Arc<
+			sc_transaction_pool::FullPool<
+				Block,
+				TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
+			>,
+		>,
+		Arc<NetworkService<Block, Hash>>,
+		SyncCryptoStorePtr,
+		bool,
+	) -> Result<Box<dyn ParachainConsensus<Block>>, sc_service::Error>,
+{
+	if matches!(parachain_config.role, Role::Light) {
+		return Err("Light client not supported!".into())
+	}
+
+	let parachain_config = prepare_node_config(parachain_config);
+
+	let params = new_partial::<RuntimeApi, Executor, BIQ>(&parachain_config, build_import_queue)?;
+	let (mut telemetry, telemetry_worker_handle) = params.other;
+
+	let relay_chain_full_node =
+		cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle)
+			.map_err(|e| match e {
+				polkadot_service::Error::Sub(x) => x,
+				s => format!("{}", s).into(),
+			})?;
+
+	let client = params.client.clone();
+	let backend = params.backend.clone();
+	let block_announce_validator = build_block_announce_validator(
+		relay_chain_full_node.client.clone(),
+		id,
+		Box::new(relay_chain_full_node.network.clone()),
+		relay_chain_full_node.backend.clone(),
+	);
+
+	let force_authoring = parachain_config.force_authoring;
+	let validator = parachain_config.role.is_authority();
+	let prometheus_registry = parachain_config.prometheus_registry().cloned();
+	let transaction_pool = params.transaction_pool.clone();
+	let mut task_manager = params.task_manager;
+	let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
+	let (network, system_rpc_tx, start_network) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &parachain_config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue: import_queue.clone(),
+			on_demand: None,
+			block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
+			warp_sync: None,
+		})?;
+
+	let rpc_client = client.clone();
+	let rpc_extensions_builder = Box::new(move |_, _| Ok(rpc_ext_builder(rpc_client.clone())));
+
+	sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+		on_demand: None,
+		remote_blockchain: None,
+		rpc_extensions_builder,
+		client: client.clone(),
+		transaction_pool: transaction_pool.clone(),
+		task_manager: &mut task_manager,
+		config: parachain_config,
+		keystore: params.keystore_container.sync_keystore(),
+		backend: backend.clone(),
+		network: network.clone(),
+		system_rpc_tx,
+		telemetry: telemetry.as_mut(),
+	})?;
+
+	let announce_block = {
+		let network = network.clone();
+		Arc::new(move |hash, data| network.announce_block(hash, data))
+	};
+
+	if validator {
+		let parachain_consensus = build_consensus(
+			client.clone(),
+			prometheus_registry.as_ref(),
+			telemetry.as_ref().map(|t| t.handle()),
+			&task_manager,
+			&relay_chain_full_node,
+			transaction_pool,
+			network,
+			params.keystore_container.sync_keystore(),
+			force_authoring,
+		)?;
+
+		let spawner = task_manager.spawn_handle();
+
+		let params = StartCollatorParams {
+			para_id: id,
+			block_status: client.clone(),
+			announce_block,
+			client: client.clone(),
+			task_manager: &mut task_manager,
+			relay_chain_full_node,
+			spawner,
+			parachain_consensus,
+			import_queue,
+		};
+
+		start_collator(params).await?;
+	} else {
+		let params = StartFullNodeParams {
+			client: client.clone(),
+			announce_block,
+			task_manager: &mut task_manager,
+			para_id: id,
+			relay_chain_full_node,
+		};
+
+		start_full_node(params)?;
+	}
+
+	start_network.start_network();
+
+	Ok((task_manager, client))
+}
+
+/// Build the import queue for the the parachain runtime.
+#[allow(clippy::type_complexity)]
+pub fn parachain_build_import_queue(
+	client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ParachainRuntimeExecutor>>>,
+	config: &Configuration,
+	telemetry: Option<TelemetryHandle>,
+	task_manager: &TaskManager,
+) -> Result<
+	sc_consensus::DefaultImportQueue<
+		Block,
+		TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ParachainRuntimeExecutor>>,
+	>,
+	sc_service::Error,
+> {
+	let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
+
+	cumulus_client_consensus_aura::import_queue::<
+		sp_consensus_aura::sr25519::AuthorityPair,
+		_,
+		_,
+		_,
+		_,
+		_,
+		_,
+	>(cumulus_client_consensus_aura::ImportQueueParams {
+		block_import: client.clone(),
+		client: client.clone(),
+		create_inherent_data_providers: move |_, _| async move {
+			let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+			let slot =
+				sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+					*time,
+					slot_duration.slot_duration(),
+				);
+
+			Ok((time, slot))
+		},
+		registry: config.prometheus_registry(),
+		can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
+		spawner: &task_manager.spawn_essential_handle(),
+		telemetry,
+	})
+	.map_err(Into::into)
+}
+
+/// Start a normal parachain node.
+pub async fn start_node(
+	parachain_config: Configuration,
+	polkadot_config: Configuration,
+	id: ParaId,
+) -> sc_service::error::Result<(
+	TaskManager,
+	Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ParachainRuntimeExecutor>>>,
+)> {
+	start_node_impl::<RuntimeApi, ParachainRuntimeExecutor, _, _, _>(
+		parachain_config,
+		polkadot_config,
+		id,
+		|_| Default::default(),
+		parachain_build_import_queue,
+		|client,
+		 prometheus_registry,
+		 telemetry,
+		 task_manager,
+		 relay_chain_node,
+		 transaction_pool,
+		 sync_oracle,
+		 keystore,
+		 force_authoring| {
+			let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
+
+			let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording(
+				task_manager.spawn_handle(),
+				client.clone(),
+				transaction_pool,
+				prometheus_registry,
+				telemetry.clone(),
+			);
+
+			let relay_chain_backend = relay_chain_node.backend.clone();
+			let relay_chain_client = relay_chain_node.client.clone();
+			Ok(build_aura_consensus::<
+				sp_consensus_aura::sr25519::AuthorityPair,
+				_,
+				_,
+				_,
+				_,
+				_,
+				_,
+				_,
+				_,
+				_,
+			>(BuildAuraConsensusParams {
+				proposer_factory,
+				create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
+					let parachain_inherent =
+						cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client(
+							relay_parent,
+							&relay_chain_client,
+							&*relay_chain_backend,
+							&validation_data,
+							id,
+						);
+					async move {
+						let time = sp_timestamp::InherentDataProvider::from_system_time();
+
+						let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
+							*time,
+							slot_duration.slot_duration(),
+						);
+
+						let parachain_inherent = parachain_inherent.ok_or_else(|| {
+							Box::<dyn std::error::Error + Send + Sync>::from(
+								"Failed to create parachain inherent",
+							)
+						})?;
+						Ok((time, slot, parachain_inherent))
+					}
+				},
+				block_import: client.clone(),
+				relay_chain_client: relay_chain_node.client.clone(),
+				relay_chain_backend: relay_chain_node.backend.clone(),
+				para_client: client,
+				backoff_authoring_blocks: Option::<()>::None,
+				sync_oracle,
+				keystore,
+				force_authoring,
+				slot_duration,
+				// We got around 500ms for proposing
+				block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32),
+				telemetry,
+				max_block_proposal_slot_portion: None,
+			}))
+		},
+	)
+	.await
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..20ce70aba8f6b22f219005a9337dceb90f44c0ca
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml
@@ -0,0 +1,122 @@
+[package]
+name = "rialto-parachain-runtime"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+homepage = "https://substrate.dev"
+repository = "https://github.com/paritytech/parity-bridges-common/"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[build-dependencies]
+substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+[dependencies]
+codec = { package = 'parity-scale-codec', version = '2.0.0', default-features = false, features = ['derive']}
+log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+serde = { version = '1.0', optional = true, features = ['derive'] }
+
+# Bridge depedencies
+
+bp-rialto-parachain = { path = "../../../primitives/chain-rialto-parachain", default-features = false }
+
+# Substrate Dependencies
+## Substrate Primitive Dependencies
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+## Substrate FRAME Dependencies
+frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+## Substrate Pallet Dependencies
+pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+# Cumulus Dependencies
+cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+parachain-info = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false }
+
+# Polkadot Dependencies
+polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+
+[features]
+default = ['std']
+runtime-benchmarks = [
+	'sp-runtime/runtime-benchmarks',
+	'frame-benchmarking',
+	'frame-support/runtime-benchmarks',
+	'frame-system-benchmarking',
+	'frame-system/runtime-benchmarks',
+	'pallet-balances/runtime-benchmarks',
+	'pallet-timestamp/runtime-benchmarks',
+]
+std = [
+	"bp-rialto-parachain/std",
+	"codec/std",
+	"log/std",
+	"scale-info/std",
+	"serde",
+	"sp-api/std",
+	"sp-std/std",
+	"sp-io/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-version/std",
+	"sp-offchain/std",
+	"sp-session/std",
+	"sp-block-builder/std",
+	"sp-transaction-pool/std",
+	"sp-inherents/std",
+	"frame-support/std",
+	"frame-executive/std",
+	"frame-system/std",
+	"pallet-balances/std",
+	"pallet-randomness-collective-flip/std",
+	"pallet-timestamp/std",
+	"pallet-sudo/std",
+	"pallet-transaction-payment/std",
+	"parachain-info/std",
+	"cumulus-pallet-aura-ext/std",
+	"cumulus-pallet-parachain-system/std",
+	"cumulus-pallet-xcmp-queue/std",
+	"cumulus-pallet-xcm/std",
+	"cumulus-primitives-core/std",
+	"cumulus-primitives-timestamp/std",
+	"cumulus-primitives-utility/std",
+	"xcm/std",
+	"xcm-builder/std",
+	"xcm-executor/std",
+	"pallet-aura/std",
+	"sp-consensus-aura/std",
+]
diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/build.rs b/polkadot/bridges/bin/rialto-parachain/runtime/build.rs
new file mode 100644
index 0000000000000000000000000000000000000000..65095bd1b7e9e002f74bdfafc5c05e2554846ebd
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/runtime/build.rs
@@ -0,0 +1,25 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use substrate_wasm_builder::WasmBuilder;
+
+fn main() {
+	WasmBuilder::new()
+		.with_current_project()
+		.export_heap_base()
+		.import_memory()
+		.build()
+}
diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5b71674b7fe9b91cb34860aff15ed899415f3baf
--- /dev/null
+++ b/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs
@@ -0,0 +1,646 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! The Rialto parachain runtime. This can be compiled with `#[no_std]`, ready for Wasm.
+//!
+//! Originally a copy of runtime from https://github.com/substrate-developer-hub/substrate-parachain-template.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256.
+#![recursion_limit = "256"]
+
+// Make the WASM binary available.
+#[cfg(feature = "std")]
+include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs"));
+
+use sp_api::impl_runtime_apis;
+use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
+use sp_runtime::{
+	create_runtime_str, generic, impl_opaque_keys,
+	traits::{AccountIdLookup, Block as BlockT},
+	transaction_validity::{TransactionSource, TransactionValidity},
+	ApplyExtrinsicResult,
+};
+
+use sp_std::prelude::*;
+#[cfg(feature = "std")]
+use sp_version::NativeVersion;
+use sp_version::RuntimeVersion;
+
+// A few exports that help ease life for downstream crates.
+pub use frame_support::{
+	construct_runtime, match_type, parameter_types,
+	traits::{Everything, IsInVec, Randomness},
+	weights::{
+		constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND},
+		DispatchClass, IdentityFee, Weight,
+	},
+	StorageValue,
+};
+pub use frame_system::Call as SystemCall;
+pub use pallet_balances::Call as BalancesCall;
+pub use pallet_timestamp::Call as TimestampCall;
+pub use sp_consensus_aura::sr25519::AuthorityId as AuraId;
+#[cfg(any(feature = "std", test))]
+pub use sp_runtime::BuildStorage;
+pub use sp_runtime::{MultiAddress, Perbill, Permill};
+
+pub use bp_rialto_parachain::{
+	AccountId, Balance, BlockLength, BlockNumber, BlockWeights, Hash, Hasher as Hashing, Header,
+	Index, Signature, MAXIMUM_BLOCK_WEIGHT,
+};
+
+// Polkadot & XCM imports
+use pallet_xcm::XcmPassthrough;
+use polkadot_parachain::primitives::Sibling;
+use xcm::latest::prelude::*;
+use xcm_builder::{
+	AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, CurrencyAdapter,
+	EnsureXcmOrigin, FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset,
+	ParentAsSuperuser, ParentIsDefault, RelayChainAsNative, SiblingParachainAsNative,
+	SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32,
+	SovereignSignedViaLocation, TakeWeightCredit, UsingComponents,
+};
+use xcm_executor::{Config, XcmExecutor};
+
+/// The address format for describing accounts.
+pub type Address = MultiAddress<AccountId, ()>;
+/// Block type as expected by this runtime.
+pub type Block = generic::Block<Header, UncheckedExtrinsic>;
+/// A Block signed with a Justification
+pub type SignedBlock = generic::SignedBlock<Block>;
+/// BlockId type as expected by this runtime.
+pub type BlockId = generic::BlockId<Block>;
+/// The SignedExtension to the basic transaction logic.
+pub type SignedExtra = (
+	frame_system::CheckSpecVersion<Runtime>,
+	frame_system::CheckGenesis<Runtime>,
+	frame_system::CheckEra<Runtime>,
+	frame_system::CheckNonce<Runtime>,
+	frame_system::CheckWeight<Runtime>,
+	pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
+);
+/// Unchecked extrinsic type as expected by this runtime.
+pub type UncheckedExtrinsic = generic::UncheckedExtrinsic<Address, Call, Signature, SignedExtra>;
+/// Extrinsic type that has already been checked.
+pub type CheckedExtrinsic = generic::CheckedExtrinsic<AccountId, Call, SignedExtra>;
+/// Executive: handles dispatch to the various modules.
+pub type Executive = frame_executive::Executive<
+	Runtime,
+	Block,
+	frame_system::ChainContext<Runtime>,
+	Runtime,
+	AllPallets,
+>;
+
+impl_opaque_keys! {
+	pub struct SessionKeys {
+		pub aura: Aura,
+	}
+}
+
+/// This runtime version.
+#[sp_version::runtime_version]
+pub const VERSION: RuntimeVersion = RuntimeVersion {
+	spec_name: create_runtime_str!("template-parachain"),
+	impl_name: create_runtime_str!("template-parachain"),
+	authoring_version: 1,
+	spec_version: 1,
+	impl_version: 0,
+	apis: RUNTIME_API_VERSIONS,
+	transaction_version: 1,
+};
+
+/// This determines the average expected block time that we are targeting.
+/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`.
+/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
+/// up by `pallet_aura` to implement `fn slot_duration()`.
+///
+/// Change this to adjust the block time.
+pub const MILLISECS_PER_BLOCK: u64 = 12000;
+
+pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
+
+pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES;
+
+// Time is measured by number of blocks.
+pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
+pub const HOURS: BlockNumber = MINUTES * 60;
+pub const DAYS: BlockNumber = HOURS * 24;
+
+// Unit = the base number of indivisible units for balances
+pub const UNIT: Balance = 1_000_000_000_000;
+pub const MILLIUNIT: Balance = 1_000_000_000;
+pub const MICROUNIT: Balance = 1_000_000;
+
+// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks.
+pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
+
+/// The version information used to identify this runtime when compiled natively.
+#[cfg(feature = "std")]
+pub fn native_version() -> NativeVersion {
+	NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
+}
+
+parameter_types! {
+	pub const BlockHashCount: BlockNumber = 250;
+	pub const Version: RuntimeVersion = VERSION;
+	pub const SS58Prefix: u8 = 48;
+}
+
+// Configure FRAME pallets to include in runtime.
+
+impl frame_system::Config for Runtime {
+	/// The identifier used to distinguish between accounts.
+	type AccountId = AccountId;
+	/// The aggregated dispatch type that is available for extrinsics.
+	type Call = Call;
+	/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
+	type Lookup = AccountIdLookup<AccountId, ()>;
+	/// The index type for storing how many extrinsics an account has signed.
+	type Index = Index;
+	/// The index type for blocks.
+	type BlockNumber = BlockNumber;
+	/// The type for hashing blocks and tries.
+	type Hash = Hash;
+	/// The hashing algorithm used.
+	type Hashing = Hashing;
+	/// The header type.
+	type Header = generic::Header<BlockNumber, Hashing>;
+	/// The ubiquitous event type.
+	type Event = Event;
+	/// The ubiquitous origin type.
+	type Origin = Origin;
+	/// Maximum number of block number to block hash mappings to keep (oldest pruned first).
+	type BlockHashCount = BlockHashCount;
+	/// Runtime version.
+	type Version = Version;
+	/// Converts a module to an index of this module in the runtime.
+	type PalletInfo = PalletInfo;
+	type AccountData = pallet_balances::AccountData<Balance>;
+	/// What to do if a new account is created.
+	type OnNewAccount = ();
+	/// What to do if an account is fully reaped from the system.
+	type OnKilledAccount = ();
+	/// The weight of database operations that the runtime can invoke.
+	type DbWeight = ();
+	/// The basic call filter to use in dispatchable.
+	type BaseCallFilter = Everything;
+	/// Weight information for the extrinsics of this pallet.
+	type SystemWeightInfo = ();
+	/// Block & extrinsics weights: base values and limits.
+	type BlockWeights = BlockWeights;
+	/// The maximum length of a block (in bytes).
+	type BlockLength = BlockLength;
+	/// This is used as an identifier of the chain. 42 is the generic substrate prefix.
+	type SS58Prefix = SS58Prefix;
+	/// The action to take on a Runtime Upgrade
+	type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode<Self>;
+}
+
+parameter_types! {
+	pub const MinimumPeriod: u64 = SLOT_DURATION / 2;
+}
+
+impl pallet_timestamp::Config for Runtime {
+	/// A timestamp: milliseconds since the Unix epoch.
+	type Moment = u64;
+	type OnTimestampSet = ();
+	type MinimumPeriod = MinimumPeriod;
+	type WeightInfo = ();
+}
+
+parameter_types! {
+	pub const ExistentialDeposit: u128 = MILLIUNIT;
+	pub const TransferFee: u128 = MILLIUNIT;
+	pub const CreationFee: u128 = MILLIUNIT;
+	pub const TransactionByteFee: u128 = MICROUNIT;
+	pub const OperationalFeeMultiplier: u8 = 5;
+	pub const MaxLocks: u32 = 50;
+	pub const MaxReserves: u32 = 50;
+}
+
+impl pallet_balances::Config for Runtime {
+	/// The type for recording an account's balance.
+	type Balance = Balance;
+	/// The ubiquitous event type.
+	type Event = Event;
+	type DustRemoval = ();
+	type ExistentialDeposit = ExistentialDeposit;
+	type AccountStore = System;
+	type WeightInfo = pallet_balances::weights::SubstrateWeight<Runtime>;
+	type MaxLocks = MaxLocks;
+	type MaxReserves = MaxReserves;
+	type ReserveIdentifier = [u8; 8];
+}
+
+impl pallet_transaction_payment::Config for Runtime {
+	type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter<Balances, ()>;
+	type TransactionByteFee = TransactionByteFee;
+	type OperationalFeeMultiplier = OperationalFeeMultiplier;
+	type WeightToFee = IdentityFee<Balance>;
+	type FeeMultiplierUpdate = ();
+}
+
+impl pallet_sudo::Config for Runtime {
+	type Call = Call;
+	type Event = Event;
+}
+
+parameter_types! {
+	pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4;
+	pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4;
+}
+
+impl cumulus_pallet_parachain_system::Config for Runtime {
+	type Event = Event;
+	type OnValidationData = ();
+	type SelfParaId = parachain_info::Pallet<Runtime>;
+	type OutboundXcmpMessageSource = XcmpQueue;
+	type DmpMessageHandler = DmpQueue;
+	type ReservedDmpWeight = ReservedDmpWeight;
+	type XcmpMessageHandler = XcmpQueue;
+	type ReservedXcmpWeight = ReservedXcmpWeight;
+}
+
+impl parachain_info::Config for Runtime {}
+
+impl cumulus_pallet_aura_ext::Config for Runtime {}
+
+impl pallet_randomness_collective_flip::Config for Runtime {}
+
+parameter_types! {
+	pub const RelayLocation: MultiLocation = MultiLocation::parent();
+	pub const RelayNetwork: NetworkId = NetworkId::Polkadot;
+	pub RelayOrigin: Origin = cumulus_pallet_xcm::Origin::Relay.into();
+	pub Ancestry: MultiLocation = Parachain(ParachainInfo::parachain_id().into()).into();
+}
+
+/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used
+/// when determining ownership of accounts for asset transacting and when attempting to use XCM
+/// `Transact` in order to determine the dispatch Origin.
+pub type LocationToAccountId = (
+	// The parent (Relay-chain) origin converts to the default `AccountId`.
+	ParentIsDefault<AccountId>,
+	// Sibling parachain origins convert to AccountId via the `ParaId::into`.
+	SiblingParachainConvertsVia<Sibling, AccountId>,
+	// Straight up local `AccountId32` origins just alias directly to `AccountId`.
+	AccountId32Aliases<RelayNetwork, AccountId>,
+);
+
+/// Means for transacting assets on this chain.
+pub type LocalAssetTransactor = CurrencyAdapter<
+	// Use this currency:
+	Balances,
+	// Use this currency when it is a fungible asset matching the given location or name:
+	IsConcrete<RelayLocation>,
+	// Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID:
+	LocationToAccountId,
+	// Our chain's account ID type (we can't get away without mentioning it explicitly):
+	AccountId,
+	// We don't track any teleports.
+	(),
+>;
+
+/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance,
+/// ready for dispatching a transaction with XCM `Transact`. There is an `OriginKind` which can
+/// biases the kind of local `Origin` it will become.
+pub type XcmOriginToTransactDispatchOrigin = (
+	// Sovereign account converter; this attempts to derive an `AccountId` from the origin location
+	// using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for
+	// foreign chains who want to have a local sovereign account on this chain which they control.
+	SovereignSignedViaLocation<LocationToAccountId, Origin>,
+	// Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when
+	// recognised.
+	RelayChainAsNative<RelayOrigin, Origin>,
+	// Native converter for sibling Parachains; will convert to a `SiblingPara` origin when
+	// recognised.
+	SiblingParachainAsNative<cumulus_pallet_xcm::Origin, Origin>,
+	// Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a
+	// transaction from the Root origin.
+	ParentAsSuperuser<Origin>,
+	// Native signed account converter; this just converts an `AccountId32` origin into a normal
+	// `Origin::Signed` origin of the same 32-byte value.
+	SignedAccountId32AsNative<RelayNetwork, Origin>,
+	// Xcm origins can be represented natively under the Xcm pallet's Xcm origin.
+	XcmPassthrough<Origin>,
+);
+
+parameter_types! {
+	// One XCM operation is 1_000_000 weight - almost certainly a conservative estimate.
+	pub UnitWeightCost: Weight = 1_000_000;
+	// One UNIT buys 1 second of weight.
+	pub const WeightPrice: (MultiLocation, u128) = (MultiLocation::parent(), UNIT);
+	pub const MaxInstructions: u32 = 100;
+	pub const MaxAuthorities: u32 = 100_000;
+}
+
+match_type! {
+	pub type ParentOrParentsUnitPlurality: impl Contains<MultiLocation> = {
+		MultiLocation { parents: 1, interior: Here } |
+		MultiLocation { parents: 1, interior: X1(Plurality { id: BodyId::Unit, .. }) }
+	};
+}
+
+pub type Barrier = (
+	TakeWeightCredit,
+	AllowTopLevelPaidExecutionFrom<Everything>,
+	AllowUnpaidExecutionFrom<ParentOrParentsUnitPlurality>,
+	// ^^^ Parent & its unit plurality gets free execution
+);
+
+pub struct XcmConfig;
+impl Config for XcmConfig {
+	type Call = Call;
+	type XcmSender = XcmRouter;
+	// How to withdraw and deposit an asset.
+	type AssetTransactor = LocalAssetTransactor;
+	type OriginConverter = XcmOriginToTransactDispatchOrigin;
+	type IsReserve = NativeAsset;
+	type IsTeleporter = NativeAsset; // <- should be enough to allow teleportation of UNIT
+	type LocationInverter = LocationInverter<Ancestry>;
+	type Barrier = Barrier;
+	type Weigher = FixedWeightBounds<UnitWeightCost, Call, MaxInstructions>;
+	type Trader = UsingComponents<IdentityFee<Balance>, RelayLocation, AccountId, Balances, ()>;
+	type ResponseHandler = PolkadotXcm;
+	type AssetTrap = PolkadotXcm;
+	type AssetClaims = PolkadotXcm;
+	type SubscriptionService = PolkadotXcm;
+}
+
+/// No local origins on this chain are allowed to dispatch XCM sends/executions.
+pub type LocalOriginToLocation = SignedToAccountId32<Origin, AccountId, RelayNetwork>;
+
+/// The means for routing XCM messages which are not for local execution into the right message
+/// queues.
+pub type XcmRouter = (
+	// Two routers - use UMP to communicate with the relay chain:
+	cumulus_primitives_utility::ParentAsUmp<ParachainSystem, ()>,
+	// ..and XCMP to communicate with the sibling chains.
+	XcmpQueue,
+);
+
+impl pallet_xcm::Config for Runtime {
+	type Event = Event;
+	type SendXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
+	type XcmRouter = XcmRouter;
+	type ExecuteXcmOrigin = EnsureXcmOrigin<Origin, LocalOriginToLocation>;
+	type XcmExecuteFilter = Everything;
+	type XcmExecutor = XcmExecutor<XcmConfig>;
+	type XcmTeleportFilter = Everything;
+	type XcmReserveTransferFilter = Everything;
+	type Weigher = FixedWeightBounds<UnitWeightCost, Call, MaxInstructions>;
+	type LocationInverter = LocationInverter<Ancestry>;
+	type Origin = Origin;
+	type Call = Call;
+	const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100;
+	type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion;
+}
+
+impl cumulus_pallet_xcm::Config for Runtime {
+	type Event = Event;
+	type XcmExecutor = XcmExecutor<XcmConfig>;
+}
+
+impl cumulus_pallet_xcmp_queue::Config for Runtime {
+	type Event = Event;
+	type XcmExecutor = XcmExecutor<XcmConfig>;
+	type ChannelInfo = ParachainSystem;
+	type VersionWrapper = ();
+}
+
+impl cumulus_pallet_dmp_queue::Config for Runtime {
+	type Event = Event;
+	type XcmExecutor = XcmExecutor<XcmConfig>;
+	type ExecuteOverweightOrigin = frame_system::EnsureRoot<AccountId>;
+}
+
+impl pallet_aura::Config for Runtime {
+	type AuthorityId = AuraId;
+	type DisabledValidators = ();
+	type MaxAuthorities = MaxAuthorities;
+}
+
+// /// Configure the pallet template in pallets/template.
+// impl template::Config for Runtime {
+// 	type Event = Event;
+// }
+
+// Create the runtime by composing the FRAME pallets that were previously configured.
+construct_runtime!(
+	pub enum Runtime where
+		Block = Block,
+		NodeBlock = generic::Block<Header, sp_runtime::OpaqueExtrinsic>,
+		UncheckedExtrinsic = UncheckedExtrinsic,
+	{
+		System: frame_system::{Pallet, Call, Storage, Config, Event<T>},
+		Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
+		Sudo: pallet_sudo::{Pallet, Call, Storage, Config<T>, Event<T>},
+		RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage},
+		TransactionPayment: pallet_transaction_payment::{Pallet, Storage},
+
+		ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Storage, Inherent, Event<T>} = 20,
+		ParachainInfo: parachain_info::{Pallet, Storage, Config} = 21,
+
+		Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>} = 30,
+
+		Aura: pallet_aura::{Pallet, Config<T>},
+		AuraExt: cumulus_pallet_aura_ext::{Pallet, Config},
+
+		// XCM helpers.
+		XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event<T>} = 50,
+		PolkadotXcm: pallet_xcm::{Pallet, Call, Event<T>, Origin} = 51,
+		CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event<T>, Origin} = 52,
+		DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event<T>} = 53,
+
+		// //Template
+		// TemplatePallet: template::{Pallet, Call, Storage, Event<T>},
+	}
+);
+
+impl_runtime_apis! {
+	impl sp_api::Core<Block> for Runtime {
+		fn version() -> RuntimeVersion {
+			VERSION
+		}
+
+		fn execute_block(block: Block) {
+			Executive::execute_block(block)
+		}
+
+		fn initialize_block(header: &<Block as BlockT>::Header) {
+			Executive::initialize_block(header)
+		}
+	}
+
+	impl sp_api::Metadata<Block> for Runtime {
+		fn metadata() -> OpaqueMetadata {
+			OpaqueMetadata::new(Runtime::metadata().into())
+		}
+	}
+
+	impl sp_block_builder::BlockBuilder<Block> for Runtime {
+		fn apply_extrinsic(
+			extrinsic: <Block as BlockT>::Extrinsic,
+		) -> ApplyExtrinsicResult {
+			Executive::apply_extrinsic(extrinsic)
+		}
+
+		fn finalize_block() -> <Block as BlockT>::Header {
+			Executive::finalize_block()
+		}
+
+		fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<<Block as BlockT>::Extrinsic> {
+			data.create_extrinsics()
+		}
+
+		fn check_inherents(
+			block: Block,
+			data: sp_inherents::InherentData,
+		) -> sp_inherents::CheckInherentsResult {
+			data.check_extrinsics(&block)
+		}
+	}
+
+	impl sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block> for Runtime {
+		fn validate_transaction(
+			source: TransactionSource,
+			tx: <Block as BlockT>::Extrinsic,
+			block_hash: <Block as BlockT>::Hash,
+		) -> TransactionValidity {
+			Executive::validate_transaction(source, tx, block_hash)
+		}
+	}
+
+	impl sp_offchain::OffchainWorkerApi<Block> for Runtime {
+		fn offchain_worker(header: &<Block as BlockT>::Header) {
+			Executive::offchain_worker(header)
+		}
+	}
+
+	impl sp_session::SessionKeys<Block> for Runtime {
+		fn decode_session_keys(
+			encoded: Vec<u8>,
+		) -> Option<Vec<(Vec<u8>, KeyTypeId)>> {
+			SessionKeys::decode_into_raw_public_keys(&encoded)
+		}
+
+		fn generate_session_keys(seed: Option<Vec<u8>>) -> Vec<u8> {
+			SessionKeys::generate(seed)
+		}
+	}
+
+	impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
+		fn slot_duration() -> sp_consensus_aura::SlotDuration {
+			sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
+		}
+
+		fn authorities() -> Vec<AuraId> {
+			Aura::authorities().to_vec()
+		}
+	}
+
+	impl cumulus_primitives_core::CollectCollationInfo<Block> for Runtime {
+		fn collect_collation_info() -> cumulus_primitives_core::CollationInfo {
+			ParachainSystem::collect_collation_info()
+		}
+	}
+
+	impl frame_system_rpc_runtime_api::AccountNonceApi<Block, AccountId, Index> for Runtime {
+		fn account_nonce(account: AccountId) -> Index {
+			System::account_nonce(account)
+		}
+	}
+
+	impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, Balance> for Runtime {
+		fn query_info(
+			uxt: <Block as BlockT>::Extrinsic,
+			len: u32,
+		) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo<Balance> {
+			TransactionPayment::query_info(uxt, len)
+		}
+		fn query_fee_details(
+			uxt: <Block as BlockT>::Extrinsic,
+			len: u32,
+		) -> pallet_transaction_payment::FeeDetails<Balance> {
+			TransactionPayment::query_fee_details(uxt, len)
+		}
+	}
+
+	#[cfg(feature = "runtime-benchmarks")]
+	impl frame_benchmarking::Benchmark<Block> for Runtime {
+		fn dispatch_benchmark(
+			config: frame_benchmarking::BenchmarkConfig
+		) -> Result<Vec<frame_benchmarking::BenchmarkBatch>, sp_runtime::RuntimeString> {
+			use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey};
+
+			use frame_system_benchmarking::Pallet as SystemBench;
+			impl frame_system_benchmarking::Config for Runtime {}
+
+			let whitelist: Vec<TrackedStorageKey> = vec![
+				// Block Number
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(),
+				// Total Issuance
+				hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(),
+				// Execution Phase
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(),
+				// Event Count
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(),
+				// System Events
+				hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(),
+			];
+
+			let mut batches = Vec::<BenchmarkBatch>::new();
+			let params = (&config, &whitelist);
+
+			add_benchmark!(params, batches, frame_system, SystemBench::<Runtime>);
+			add_benchmark!(params, batches, pallet_balances, Balances);
+			add_benchmark!(params, batches, pallet_timestamp, Timestamp);
+
+			if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) }
+			Ok(batches)
+		}
+	}
+}
+
+struct CheckInherents;
+
+impl cumulus_pallet_parachain_system::CheckInherents<Block> for CheckInherents {
+	fn check_inherents(
+		block: &Block,
+		relay_state_proof: &cumulus_pallet_parachain_system::RelayChainStateProof,
+	) -> sp_inherents::CheckInherentsResult {
+		let relay_chain_slot = relay_state_proof
+			.read_slot()
+			.expect("Could not read the relay chain slot from the proof");
+
+		let inherent_data =
+			cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration(
+				relay_chain_slot,
+				sp_std::time::Duration::from_secs(6),
+			)
+			.create_inherent_data()
+			.expect("Could not create the timestamp inherent data");
+
+		inherent_data.check_extrinsics(block)
+	}
+}
+
+cumulus_pallet_parachain_system::register_validate_block!(
+	Runtime = Runtime,
+	BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::<Runtime, Executive>,
+	CheckInherents = CheckInherents,
+);
diff --git a/polkadot/bridges/bin/rialto/node/Cargo.toml b/polkadot/bridges/bin/rialto/node/Cargo.toml
index 1c9ec8b5bb0abc2ce43eb079753b2dc7a593c365..75be9bcd9fb7dc189d77c2a53fbf0a47f57a9203 100644
--- a/polkadot/bridges/bin/rialto/node/Cargo.toml
+++ b/polkadot/bridges/bin/rialto/node/Cargo.toml
@@ -10,13 +10,17 @@ repository = "https://github.com/paritytech/parity-bridges-common/"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-jsonrpc-core = "15.1.0"
+futures = "0.3"
+jsonrpc-core = "18.0"
+kvdb = "0.10"
+kvdb-rocksdb = "0.12"
+lru = "0.7"
 structopt = "0.3.21"
 serde_json = "1.0.59"
+thiserror = "1.0"
 
 # Bridge dependencies
 
-bp-messages = { path = "../../../primitives/messages" }
 bp-runtime = { path = "../../../primitives/runtime" }
 bp-rialto = { path = "../../../primitives/chain-rialto" }
 pallet-bridge-messages = { path = "../../../modules/messages" }
@@ -24,32 +28,78 @@ rialto-runtime = { path = "../runtime" }
 
 # Substrate Dependencies
 
-
 frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" }
 frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" }
+frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" }
 pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
+pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] }
 sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-consensus-uncles = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
+#sc-finality-grandpa-warp-sync = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" }
 substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
+substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+# Polkadot (parachain) Dependencies
+
+polkadot-approval-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-availability-bitfield-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-availability-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-availability-recovery = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-collator-protocol = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-dispute-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-gossip-support = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-network-bridge = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-collation-generation = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-approval-voting = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-av-store = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-backing = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-bitfield-signing = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-candidate-validation = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-chain-api = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-chain-selection = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-dispute-participation = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-parachains-inherent = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-provisioner = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-pvf = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-runtime-api = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-core-dispute-coordinator = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-network-protocol = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-node-subsystem-util = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-overseer = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-statement-distribution = { git = "https://github.com/paritytech/polkadot", branch = "master" }
 
 [build-dependencies]
 substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs
index 4174cda24487ee4016cce39968a32c94c6987610..3ccfa13e74acb7e0b1709fa8c2d4a0939eef4a1d 100644
--- a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs
+++ b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs
@@ -15,12 +15,15 @@
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
 use bp_rialto::derive_account_from_millau_id;
+use polkadot_primitives::v1::{AssignmentId, ValidatorId};
 use rialto_runtime::{
-	AccountId, AuraConfig, BalancesConfig, BridgeKovanConfig, BridgeRialtoPoaConfig, GenesisConfig, GrandpaConfig,
-	SessionConfig, SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
+	AccountId, BabeConfig, BalancesConfig, BridgeKovanConfig, BridgeMillauMessagesConfig,
+	BridgeRialtoPoaConfig, ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig,
+	SessionKeys, Signature, SudoConfig, SystemConfig, WASM_BINARY,
 };
 use serde_json::json;
-use sp_consensus_aura::sr25519::AuthorityId as AuraId;
+use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
+use sp_consensus_babe::AuthorityId as BabeId;
 use sp_core::{sr25519, Pair, Public};
 use sp_finality_grandpa::AuthorityId as GrandpaId;
 use sp_runtime::traits::{IdentifyAccount, Verify};
@@ -56,12 +59,17 @@ where
 	AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
 }
 
-/// Helper function to generate an authority key for Aura
-pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) {
+/// Helper function to generate authority keys.
+pub fn get_authority_keys_from_seed(
+	s: &str,
+) -> (AccountId, BabeId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) {
 	(
 		get_account_id_from_seed::<sr25519::Public>(s),
-		get_from_seed::<AuraId>(s),
+		get_from_seed::<BabeId>(s),
 		get_from_seed::<GrandpaId>(s),
+		get_from_seed::<ValidatorId>(s),
+		get_from_seed::<AssignmentId>(s),
+		get_from_seed::<AuthorityDiscoveryId>(s),
 	)
 }
 
@@ -71,10 +79,7 @@ impl Alternative {
 		let properties = Some(
 			json!({
 				"tokenDecimals": 9,
-				"tokenSymbol": "RLT",
-				"bridgeIds": {
-					"Millau": bp_runtime::MILLAU_CHAIN_ID,
-				}
+				"tokenSymbol": "RLT"
 			})
 			.as_object()
 			.expect("Map given; qed")
@@ -82,8 +87,8 @@ impl Alternative {
 		);
 		match self {
 			Alternative::Development => ChainSpec::from_genesis(
-				"Development",
-				"dev",
+				"Rialto Development",
+				"rialto_dev",
 				sc_service::ChainType::Development,
 				|| {
 					testnet_genesis(
@@ -108,8 +113,8 @@ impl Alternative {
 				None,
 			),
 			Alternative::LocalTestnet => ChainSpec::from_genesis(
-				"Local Testnet",
-				"local_testnet",
+				"Rialto Local",
+				"rialto_local",
 				sc_service::ChainType::Local,
 				|| {
 					testnet_genesis(
@@ -138,10 +143,12 @@ impl Alternative {
 							get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
 							get_account_id_from_seed::<sr25519::Public>("George//stash"),
 							get_account_id_from_seed::<sr25519::Public>("Harry//stash"),
-							pallet_bridge_messages::Pallet::<
-								rialto_runtime::Runtime,
-								pallet_bridge_messages::DefaultInstance,
-							>::relayer_fund_account_id(),
+							get_account_id_from_seed::<sr25519::Public>("MillauMessagesOwner"),
+							get_account_id_from_seed::<sr25519::Public>("WithMillauTokenSwap"),
+							pallet_bridge_messages::relayer_fund_account_id::<
+								bp_rialto::AccountId,
+								bp_rialto::AccountIdConverter,
+							>(),
 							derive_account_from_millau_id(bp_runtime::SourceAccount::Account(
 								get_account_id_from_seed::<sr25519::Public>("Alice"),
 							)),
@@ -174,12 +181,25 @@ impl Alternative {
 	}
 }
 
-fn session_keys(aura: AuraId, grandpa: GrandpaId) -> SessionKeys {
-	SessionKeys { aura, grandpa }
+fn session_keys(
+	babe: BabeId,
+	grandpa: GrandpaId,
+	para_validator: ValidatorId,
+	para_assignment: AssignmentId,
+	authority_discovery: AuthorityDiscoveryId,
+) -> SessionKeys {
+	SessionKeys { babe, grandpa, para_validator, para_assignment, authority_discovery }
 }
 
 fn testnet_genesis(
-	initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>,
+	initial_authorities: Vec<(
+		AccountId,
+		BabeId,
+		GrandpaId,
+		ValidatorId,
+		AssignmentId,
+		AuthorityDiscoveryId,
+	)>,
 	root_key: AccountId,
 	endowed_accounts: Vec<AccountId>,
 	_enable_println: bool,
@@ -192,21 +212,82 @@ fn testnet_genesis(
 		balances: BalancesConfig {
 			balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(),
 		},
-		aura: AuraConfig {
+		babe: BabeConfig {
 			authorities: Vec::new(),
+			epoch_config: Some(rialto_runtime::BABE_GENESIS_EPOCH_CONFIG),
 		},
 		bridge_rialto_poa: load_rialto_poa_bridge_config(),
 		bridge_kovan: load_kovan_bridge_config(),
-		grandpa: GrandpaConfig {
-			authorities: Vec::new(),
-		},
+		grandpa: GrandpaConfig { authorities: Vec::new() },
 		sudo: SudoConfig { key: root_key },
 		session: SessionConfig {
 			keys: initial_authorities
 				.iter()
-				.map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone())))
+				.map(|x| {
+					(
+						x.0.clone(),
+						x.0.clone(),
+						session_keys(
+							x.1.clone(),
+							x.2.clone(),
+							x.3.clone(),
+							x.4.clone(),
+							x.5.clone(),
+						),
+					)
+				})
 				.collect::<Vec<_>>(),
 		},
+		authority_discovery: Default::default(),
+		hrmp: Default::default(),
+		// this configuration is exact copy of configuration from Polkadot repo
+		// (see /node/service/src/chain_spec.rs:default_parachains_host_configuration)
+		configuration: ConfigurationConfig {
+			config: polkadot_runtime_parachains::configuration::HostConfiguration {
+				validation_upgrade_frequency: 1u32,
+				validation_upgrade_delay: 1,
+				code_retention_period: 1200,
+				max_code_size: polkadot_primitives::v1::MAX_CODE_SIZE,
+				max_pov_size: polkadot_primitives::v1::MAX_POV_SIZE,
+				max_head_data_size: 32 * 1024,
+				group_rotation_frequency: 20,
+				chain_availability_period: 4,
+				thread_availability_period: 4,
+				max_upward_queue_count: 8,
+				max_upward_queue_size: 1024 * 1024,
+				max_downward_message_size: 1024,
+				// this is approximatelly 4ms.
+				//
+				// Same as `4 * frame_support::weights::WEIGHT_PER_MILLIS`. We don't bother with
+				// an import since that's a made up number and should be replaced with a constant
+				// obtained by benchmarking anyway.
+				ump_service_total_weight: 4 * 1_000_000_000,
+				max_upward_message_size: 1024 * 1024,
+				max_upward_message_num_per_candidate: 5,
+				hrmp_sender_deposit: 0,
+				hrmp_recipient_deposit: 0,
+				hrmp_channel_max_capacity: 8,
+				hrmp_channel_max_total_size: 8 * 1024,
+				hrmp_max_parachain_inbound_channels: 4,
+				hrmp_max_parathread_inbound_channels: 4,
+				hrmp_channel_max_message_size: 1024 * 1024,
+				hrmp_max_parachain_outbound_channels: 4,
+				hrmp_max_parathread_outbound_channels: 4,
+				hrmp_max_message_num_per_candidate: 5,
+				dispute_period: 6,
+				no_show_slots: 2,
+				n_delay_tranches: 25,
+				needed_approvals: 2,
+				relay_vrf_modulo_samples: 2,
+				zeroth_delay_tranche_width: 0,
+				..Default::default()
+			},
+		},
+		paras: Default::default(),
+		bridge_millau_messages: BridgeMillauMessagesConfig {
+			owner: Some(get_account_id_from_seed::<sr25519::Public>("MillauMessagesOwner")),
+			..Default::default()
+		},
 	}
 }
 
@@ -229,9 +310,7 @@ fn load_kovan_bridge_config() -> BridgeKovanConfig {
 #[test]
 fn derived_dave_account_is_as_expected() {
 	let dave = get_account_id_from_seed::<sr25519::Public>("Dave");
-	let derived: AccountId = derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave));
-	assert_eq!(
-		derived.to_string(),
-		"5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string()
-	);
+	let derived: AccountId =
+		derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave));
+	assert_eq!(derived.to_string(), "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string());
 }
diff --git a/polkadot/bridges/bin/rialto/node/src/cli.rs b/polkadot/bridges/bin/rialto/node/src/cli.rs
index 46323ed25c9ed2e39ebc6089b5bfa0e2ad29ddfd..3f85a69a713fe5125f2fe8d402c8fb1d9608b107 100644
--- a/polkadot/bridges/bin/rialto/node/src/cli.rs
+++ b/polkadot/bridges/bin/rialto/node/src/cli.rs
@@ -29,10 +29,10 @@ pub struct Cli {
 /// Possible subcommands of the main binary.
 #[derive(Debug, StructOpt)]
 pub enum Subcommand {
-	/// Key management cli utilities
+	/// Key management CLI utilities
 	Key(sc_cli::KeySubcommand),
 
-	/// Verify a signature for a message, provided on STDIN, with a given (public or secret) key.
+	/// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key.
 	Verify(sc_cli::VerifyCmd),
 
 	/// Generate a seed that provides a vanity address.
@@ -67,4 +67,19 @@ pub enum Subcommand {
 
 	/// Benchmark runtime pallets.
 	Benchmark(frame_benchmarking_cli::BenchmarkCmd),
+
+	/// FOR INTERNAL USE: analog of the "prepare-worker" command of the polkadot binary.
+	#[structopt(name = "prepare-worker", setting = structopt::clap::AppSettings::Hidden)]
+	PvfPrepareWorker(ValidationWorkerCommand),
+
+	/// FOR INTERNAL USE: analog of the "execute-worker" command of the polkadot binary.
+	#[structopt(name = "execute-worker", setting = structopt::clap::AppSettings::Hidden)]
+	PvfExecuteWorker(ValidationWorkerCommand),
+}
+
+/// Validation worker command.
+#[derive(Debug, StructOpt)]
+pub struct ValidationWorkerCommand {
+	/// The path to the validation host's socket.
+	pub socket_path: String,
 }
diff --git a/polkadot/bridges/bin/rialto/node/src/command.rs b/polkadot/bridges/bin/rialto/node/src/command.rs
index a9930c57417ec0e68cf46f42f6090f18103ec9f8..6f841a9d67f1ded579e053cdf592b684bc3855db 100644
--- a/polkadot/bridges/bin/rialto/node/src/command.rs
+++ b/polkadot/bridges/bin/rialto/node/src/command.rs
@@ -14,9 +14,10 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{Cli, Subcommand};
-use crate::service;
-use crate::service::new_partial;
+use crate::{
+	cli::{Cli, Subcommand},
+	service::new_partial,
+};
 use rialto_runtime::{Block, RuntimeApi};
 use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli};
 use sc_service::PartialComponents;
@@ -74,19 +75,18 @@ pub fn run() -> sc_cli::Result<()> {
 	));
 
 	match &cli.subcommand {
-		Some(Subcommand::Benchmark(cmd)) => {
+		Some(Subcommand::Benchmark(cmd)) =>
 			if cfg!(feature = "runtime-benchmarks") {
 				let runner = cli.create_runner(cmd)?;
 
-				runner.sync_run(|config| cmd.run::<Block, service::Executor>(config))
+				runner.sync_run(|config| cmd.run::<Block, crate::service::ExecutorDispatch>(config))
 			} else {
 				println!(
 					"Benchmarking wasn't enabled when building the node. \
 				You can enable it with `--features runtime-benchmarks`."
 				);
 				Ok(())
-			}
-		}
+			},
 		Some(Subcommand::Key(cmd)) => cmd.run(&cli),
 		Some(Subcommand::Sign(cmd)) => cmd.run(),
 		Some(Subcommand::Verify(cmd)) => cmd.run(),
@@ -94,79 +94,99 @@ pub fn run() -> sc_cli::Result<()> {
 		Some(Subcommand::BuildSpec(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.chain_spec, config.network))
-		}
+		},
 		Some(Subcommand::CheckBlock(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+			runner.async_run(|mut config| {
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+			runner.async_run(|mut config| {
+				let PartialComponents { client, task_manager, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, config.database), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ExportState(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.async_run(|config| {
-				let PartialComponents {
-					client, task_manager, ..
-				} = new_partial(&config)?;
+			runner.async_run(|mut config| {
+				let PartialComponents { client, task_manager, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, config.chain_spec), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::ImportBlocks(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					import_queue,
-					..
-				} = new_partial(&config)?;
+			runner.async_run(|mut config| {
+				let PartialComponents { client, task_manager, import_queue, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, import_queue), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::PurgeChain(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
 			runner.sync_run(|config| cmd.run(config.database))
-		}
+		},
 		Some(Subcommand::Revert(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.async_run(|config| {
-				let PartialComponents {
-					client,
-					task_manager,
-					backend,
-					..
-				} = new_partial(&config)?;
+			runner.async_run(|mut config| {
+				let PartialComponents { client, task_manager, backend, .. } =
+					new_partial(&mut config).map_err(service_error)?;
 				Ok((cmd.run(client, backend), task_manager))
 			})
-		}
+		},
 		Some(Subcommand::Inspect(cmd)) => {
 			let runner = cli.create_runner(cmd)?;
-			runner.sync_run(|config| cmd.run::<Block, RuntimeApi, service::Executor>(config))
-		}
+			runner.sync_run(|config| {
+				cmd.run::<Block, RuntimeApi, crate::service::ExecutorDispatch>(config)
+			})
+		},
+		Some(Subcommand::PvfPrepareWorker(cmd)) => {
+			let mut builder = sc_cli::LoggerBuilder::new("");
+			builder.with_colors(false);
+			let _ = builder.init();
+
+			polkadot_node_core_pvf::prepare_worker_entrypoint(&cmd.socket_path);
+			Ok(())
+		},
+		Some(crate::cli::Subcommand::PvfExecuteWorker(cmd)) => {
+			let mut builder = sc_cli::LoggerBuilder::new("");
+			builder.with_colors(false);
+			let _ = builder.init();
+
+			polkadot_node_core_pvf::execute_worker_entrypoint(&cmd.socket_path);
+			Ok(())
+		},
 		None => {
 			let runner = cli.create_runner(&cli.run)?;
-			runner
-				.run_node_until_exit(|config| async move {
-					match config.role {
-						Role::Light => service::new_light(config),
-						_ => service::new_full(config),
-					}
-				})
-				.map_err(sc_cli::Error::Service)
-		}
+
+			// some parameters that are used by polkadot nodes, but that are not used by our binary
+			// let jaeger_agent = None;
+			// let grandpa_pause = None;
+			// let no_beefy = true;
+			// let telemetry_worker_handler = None;
+			// let is_collator = crate::service::IsCollator::No;
+			let overseer_gen = crate::overseer::RealOverseerGen;
+			runner.run_node_until_exit(|config| async move {
+				match config.role {
+					Role::Light => Err(sc_cli::Error::Service(sc_service::Error::Other(
+						"Light client is not supported by this node".into(),
+					))),
+					_ => crate::service::build_full(config, overseer_gen)
+						.map(|full| full.task_manager)
+						.map_err(service_error),
+				}
+			})
+		},
 	}
 }
+
+// We don't want to change 'service.rs' too much to ease future updates => it'll keep using
+// its own error enum like original polkadot service does.
+fn service_error(err: crate::service::Error) -> sc_cli::Error {
+	sc_cli::Error::Application(Box::new(err))
+}
diff --git a/polkadot/bridges/bin/rialto/node/src/main.rs b/polkadot/bridges/bin/rialto/node/src/main.rs
index f319d1437a98fa41f19e8bd39266eef74787d3fb..824814224e548418d402e542eec8e33755b949e4 100644
--- a/polkadot/bridges/bin/rialto/node/src/main.rs
+++ b/polkadot/bridges/bin/rialto/node/src/main.rs
@@ -23,6 +23,8 @@ mod chain_spec;
 mod service;
 mod cli;
 mod command;
+mod overseer;
+mod parachains_db;
 
 /// Run the Rialto Node
 fn main() -> sc_cli::Result<()> {
diff --git a/polkadot/bridges/bin/rialto/node/src/overseer.rs b/polkadot/bridges/bin/rialto/node/src/overseer.rs
new file mode 100644
index 0000000000000000000000000000000000000000..17f7edce2a319c9122fbc49afad1670efaf9b827
--- /dev/null
+++ b/polkadot/bridges/bin/rialto/node/src/overseer.rs
@@ -0,0 +1,319 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! This is almost 1:1 copy of `node/service/src/overseer.rs` file from Polkadot repository.
+//! The only exception is that we don't support db upgrades => no `upgrade.rs` module.
+
+// this warning comes from `polkadot_overseer::AllSubsystems` type
+#![allow(clippy::type_complexity)]
+
+use crate::service::{AuthorityDiscoveryApi, Error};
+use rialto_runtime::{opaque::Block, Hash};
+
+use lru::LruCache;
+use polkadot_availability_distribution::IncomingRequestReceivers;
+use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig;
+use polkadot_node_core_av_store::Config as AvailabilityConfig;
+use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig;
+use polkadot_node_core_chain_selection::Config as ChainSelectionConfig;
+use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
+use polkadot_node_network_protocol::request_response::{v1 as request_v1, IncomingRequestReceiver};
+use polkadot_overseer::{
+	metrics::Metrics as OverseerMetrics, BlockInfo, MetricsTrait, Overseer, OverseerBuilder,
+	OverseerConnector, OverseerHandle,
+};
+use polkadot_primitives::v1::ParachainHost;
+use sc_authority_discovery::Service as AuthorityDiscoveryService;
+use sc_client_api::AuxStore;
+use sc_keystore::LocalKeystore;
+use sp_api::ProvideRuntimeApi;
+use sp_blockchain::HeaderBackend;
+use sp_consensus_babe::BabeApi;
+use sp_core::traits::SpawnNamed;
+use std::sync::Arc;
+use substrate_prometheus_endpoint::Registry;
+
+pub use polkadot_approval_distribution::ApprovalDistribution as ApprovalDistributionSubsystem;
+pub use polkadot_availability_bitfield_distribution::BitfieldDistribution as BitfieldDistributionSubsystem;
+pub use polkadot_availability_distribution::AvailabilityDistributionSubsystem;
+pub use polkadot_availability_recovery::AvailabilityRecoverySubsystem;
+pub use polkadot_collator_protocol::{CollatorProtocolSubsystem, ProtocolSide};
+pub use polkadot_dispute_distribution::DisputeDistributionSubsystem;
+pub use polkadot_gossip_support::GossipSupport as GossipSupportSubsystem;
+pub use polkadot_network_bridge::NetworkBridge as NetworkBridgeSubsystem;
+pub use polkadot_node_collation_generation::CollationGenerationSubsystem;
+pub use polkadot_node_core_approval_voting::ApprovalVotingSubsystem;
+pub use polkadot_node_core_av_store::AvailabilityStoreSubsystem;
+pub use polkadot_node_core_backing::CandidateBackingSubsystem;
+pub use polkadot_node_core_bitfield_signing::BitfieldSigningSubsystem;
+pub use polkadot_node_core_candidate_validation::CandidateValidationSubsystem;
+pub use polkadot_node_core_chain_api::ChainApiSubsystem;
+pub use polkadot_node_core_chain_selection::ChainSelectionSubsystem;
+pub use polkadot_node_core_dispute_coordinator::DisputeCoordinatorSubsystem;
+pub use polkadot_node_core_dispute_participation::DisputeParticipationSubsystem;
+pub use polkadot_node_core_provisioner::ProvisioningSubsystem as ProvisionerSubsystem;
+pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem;
+pub use polkadot_statement_distribution::StatementDistribution as StatementDistributionSubsystem;
+
+/// Arguments passed for overseer construction.
+pub struct OverseerGenArgs<'a, Spawner, RuntimeClient>
+where
+	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
+	RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
+	Spawner: 'static + SpawnNamed + Clone + Unpin,
+{
+	/// Set of initial relay chain leaves to track.
+	pub leaves: Vec<BlockInfo>,
+	/// The keystore to use for i.e. validator keys.
+	pub keystore: Arc<LocalKeystore>,
+	/// Runtime client generic, providing the `ProvieRuntimeApi` trait besides others.
+	pub runtime_client: Arc<RuntimeClient>,
+	/// The underlying key value store for the parachains.
+	pub parachains_db: Arc<dyn kvdb::KeyValueDB>,
+	/// Underlying network service implementation.
+	pub network_service: Arc<sc_network::NetworkService<Block, Hash>>,
+	/// Underlying authority discovery service.
+	pub authority_discovery_service: AuthorityDiscoveryService,
+	/// POV request receiver
+	pub pov_req_receiver: IncomingRequestReceiver<request_v1::PoVFetchingRequest>,
+	pub chunk_req_receiver: IncomingRequestReceiver<request_v1::ChunkFetchingRequest>,
+	pub collation_req_receiver: IncomingRequestReceiver<request_v1::CollationFetchingRequest>,
+	pub available_data_req_receiver:
+		IncomingRequestReceiver<request_v1::AvailableDataFetchingRequest>,
+	pub statement_req_receiver: IncomingRequestReceiver<request_v1::StatementFetchingRequest>,
+	pub dispute_req_receiver: IncomingRequestReceiver<request_v1::DisputeRequest>,
+	/// Prometheus registry, commonly used for production systems, less so for test.
+	pub registry: Option<&'a Registry>,
+	/// Task spawner to be used throughout the overseer and the APIs it provides.
+	pub spawner: Spawner,
+	/// Configuration for the approval voting subsystem.
+	pub approval_voting_config: ApprovalVotingConfig,
+	/// Configuration for the availability store subsystem.
+	pub availability_config: AvailabilityConfig,
+	/// Configuration for the candidate validation subsystem.
+	pub candidate_validation_config: CandidateValidationConfig,
+	/// Configuration for the chain selection subsystem.
+	pub chain_selection_config: ChainSelectionConfig,
+	/// Configuration for the dispute coordinator subsystem.
+	pub dispute_coordinator_config: DisputeCoordinatorConfig,
+}
+
+/// Obtain a prepared `OverseerBuilder`, that is initialized
+/// with all default values.
+pub fn prepared_overseer_builder<Spawner, RuntimeClient>(
+	OverseerGenArgs {
+		leaves,
+		keystore,
+		runtime_client,
+		parachains_db,
+		network_service,
+		authority_discovery_service,
+		pov_req_receiver,
+		chunk_req_receiver,
+		collation_req_receiver: _,
+		available_data_req_receiver,
+		statement_req_receiver,
+		dispute_req_receiver,
+		registry,
+		spawner,
+		approval_voting_config,
+		availability_config,
+		candidate_validation_config,
+		chain_selection_config,
+		dispute_coordinator_config,
+	}: OverseerGenArgs<'_, Spawner, RuntimeClient>,
+) -> Result<
+	OverseerBuilder<
+		Spawner,
+		Arc<RuntimeClient>,
+		CandidateValidationSubsystem,
+		CandidateBackingSubsystem<Spawner>,
+		StatementDistributionSubsystem,
+		AvailabilityDistributionSubsystem,
+		AvailabilityRecoverySubsystem,
+		BitfieldSigningSubsystem<Spawner>,
+		BitfieldDistributionSubsystem,
+		ProvisionerSubsystem<Spawner>,
+		RuntimeApiSubsystem<RuntimeClient>,
+		AvailabilityStoreSubsystem,
+		NetworkBridgeSubsystem<
+			Arc<sc_network::NetworkService<Block, Hash>>,
+			AuthorityDiscoveryService,
+		>,
+		ChainApiSubsystem<RuntimeClient>,
+		CollationGenerationSubsystem,
+		CollatorProtocolSubsystem,
+		ApprovalDistributionSubsystem,
+		ApprovalVotingSubsystem,
+		GossipSupportSubsystem<AuthorityDiscoveryService>,
+		DisputeCoordinatorSubsystem,
+		DisputeParticipationSubsystem,
+		DisputeDistributionSubsystem<AuthorityDiscoveryService>,
+		ChainSelectionSubsystem,
+	>,
+	Error,
+>
+where
+	RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
+	RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
+	Spawner: 'static + SpawnNamed + Clone + Unpin,
+{
+	use polkadot_node_subsystem_util::metrics::Metrics;
+	use std::iter::FromIterator;
+
+	let metrics = <OverseerMetrics as MetricsTrait>::register(registry)?;
+
+	let builder = Overseer::builder()
+		.availability_distribution(AvailabilityDistributionSubsystem::new(
+			keystore.clone(),
+			IncomingRequestReceivers { pov_req_receiver, chunk_req_receiver },
+			Metrics::register(registry)?,
+		))
+		.availability_recovery(AvailabilityRecoverySubsystem::with_chunks_only(
+			available_data_req_receiver,
+			Metrics::register(registry)?,
+		))
+		.availability_store(AvailabilityStoreSubsystem::new(
+			parachains_db.clone(),
+			availability_config,
+			Metrics::register(registry)?,
+		))
+		.bitfield_distribution(BitfieldDistributionSubsystem::new(Metrics::register(registry)?))
+		.bitfield_signing(BitfieldSigningSubsystem::new(
+			spawner.clone(),
+			keystore.clone(),
+			Metrics::register(registry)?,
+		))
+		.candidate_backing(CandidateBackingSubsystem::new(
+			spawner.clone(),
+			keystore.clone(),
+			Metrics::register(registry)?,
+		))
+		.candidate_validation(CandidateValidationSubsystem::with_config(
+			candidate_validation_config,
+			Metrics::register(registry)?, // candidate-validation metrics
+			Metrics::register(registry)?, // validation host metrics
+		))
+		.chain_api(ChainApiSubsystem::new(runtime_client.clone(), Metrics::register(registry)?))
+		.collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?))
+		.collator_protocol(CollatorProtocolSubsystem::new(ProtocolSide::Validator {
+			keystore: keystore.clone(),
+			eviction_policy: Default::default(),
+			metrics: Metrics::register(registry)?,
+		}))
+		.network_bridge(NetworkBridgeSubsystem::new(
+			network_service.clone(),
+			authority_discovery_service.clone(),
+			Box::new(network_service.clone()),
+			Metrics::register(registry)?,
+		))
+		.provisioner(ProvisionerSubsystem::new(spawner.clone(), (), Metrics::register(registry)?))
+		.runtime_api(RuntimeApiSubsystem::new(
+			runtime_client.clone(),
+			Metrics::register(registry)?,
+			spawner.clone(),
+		))
+		.statement_distribution(StatementDistributionSubsystem::new(
+			keystore.clone(),
+			statement_req_receiver,
+			Metrics::register(registry)?,
+		))
+		.approval_distribution(ApprovalDistributionSubsystem::new(Metrics::register(registry)?))
+		.approval_voting(ApprovalVotingSubsystem::with_config(
+			approval_voting_config,
+			parachains_db.clone(),
+			keystore.clone(),
+			Box::new(network_service),
+			Metrics::register(registry)?,
+		))
+		.gossip_support(GossipSupportSubsystem::new(
+			keystore.clone(),
+			authority_discovery_service.clone(),
+		))
+		.dispute_coordinator(DisputeCoordinatorSubsystem::new(
+			parachains_db.clone(),
+			dispute_coordinator_config,
+			keystore.clone(),
+			Metrics::register(registry)?,
+		))
+		.dispute_participation(DisputeParticipationSubsystem::new())
+		.dispute_distribution(DisputeDistributionSubsystem::new(
+			keystore,
+			dispute_req_receiver,
+			authority_discovery_service,
+			Metrics::register(registry)?,
+		))
+		.chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db))
+		.leaves(Vec::from_iter(
+			leaves
+				.into_iter()
+				.map(|BlockInfo { hash, parent_hash: _, number }| (hash, number)),
+		))
+		.activation_external_listeners(Default::default())
+		.span_per_active_leaf(Default::default())
+		.active_leaves(Default::default())
+		.supports_parachains(runtime_client)
+		.known_leaves(LruCache::new(KNOWN_LEAVES_CACHE_SIZE))
+		.metrics(metrics)
+		.spawner(spawner);
+	Ok(builder)
+}
+
+/// Trait for the `fn` generating the overseer.
+///
+/// Default behavior is to create an unmodified overseer, as `RealOverseerGen`
+/// would do.
+pub trait OverseerGen {
+	/// Overwrite the full generation of the overseer, including the subsystems.
+	fn generate<Spawner, RuntimeClient>(
+		&self,
+		connector: OverseerConnector,
+		args: OverseerGenArgs<'_, Spawner, RuntimeClient>,
+	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandle), Error>
+	where
+		RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
+		RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
+		Spawner: 'static + SpawnNamed + Clone + Unpin,
+	{
+		let gen = RealOverseerGen;
+		RealOverseerGen::generate::<Spawner, RuntimeClient>(&gen, connector, args)
+	}
+	// It would be nice to make `create_subsystems` part of this trait,
+	// but the amount of generic arguments that would be required as
+	// as consequence make this rather annoying to implement and use.
+}
+
+use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE;
+
+/// The regular set of subsystems.
+pub struct RealOverseerGen;
+
+impl OverseerGen for RealOverseerGen {
+	fn generate<Spawner, RuntimeClient>(
+		&self,
+		connector: OverseerConnector,
+		args: OverseerGenArgs<'_, Spawner, RuntimeClient>,
+	) -> Result<(Overseer<Spawner, Arc<RuntimeClient>>, OverseerHandle), Error>
+	where
+		RuntimeClient: 'static + ProvideRuntimeApi<Block> + HeaderBackend<Block> + AuxStore,
+		RuntimeClient::Api: ParachainHost<Block> + BabeApi<Block> + AuthorityDiscoveryApi<Block>,
+		Spawner: 'static + SpawnNamed + Clone + Unpin,
+	{
+		prepared_overseer_builder(args)?
+			.build_with_connector(connector)
+			.map_err(|e| e.into())
+	}
+}
diff --git a/polkadot/bridges/bin/rialto/node/src/parachains_db.rs b/polkadot/bridges/bin/rialto/node/src/parachains_db.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bf2052043c98797e5f2e594b75ada58397f4d109
--- /dev/null
+++ b/polkadot/bridges/bin/rialto/node/src/parachains_db.rs
@@ -0,0 +1,104 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! This is almost 1:1 copy of `node/service/parachains_db/mod.rs` file from Polkadot repository.
+//! The only exception is that we don't support db upgrades => no `upgrade.rs` module.
+
+use kvdb::KeyValueDB;
+use std::{io, path::PathBuf, sync::Arc};
+
+mod columns {
+	pub const NUM_COLUMNS: u32 = 5;
+
+	pub const COL_AVAILABILITY_DATA: u32 = 0;
+	pub const COL_AVAILABILITY_META: u32 = 1;
+	pub const COL_APPROVAL_DATA: u32 = 2;
+	pub const COL_CHAIN_SELECTION_DATA: u32 = 3;
+	pub const COL_DISPUTE_COORDINATOR_DATA: u32 = 4;
+}
+
+/// Columns used by different subsystems.
+#[derive(Debug, Clone)]
+pub struct ColumnsConfig {
+	/// The column used by the av-store for data.
+	pub col_availability_data: u32,
+	/// The column used by the av-store for meta information.
+	pub col_availability_meta: u32,
+	/// The column used by approval voting for data.
+	pub col_approval_data: u32,
+	/// The column used by chain selection for data.
+	pub col_chain_selection_data: u32,
+	/// The column used by dispute coordinator for data.
+	pub col_dispute_coordinator_data: u32,
+}
+
+/// The real columns used by the parachains DB.
+pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig {
+	col_availability_data: columns::COL_AVAILABILITY_DATA,
+	col_availability_meta: columns::COL_AVAILABILITY_META,
+	col_approval_data: columns::COL_APPROVAL_DATA,
+	col_chain_selection_data: columns::COL_CHAIN_SELECTION_DATA,
+	col_dispute_coordinator_data: columns::COL_DISPUTE_COORDINATOR_DATA,
+};
+
+/// The cache size for each column, in megabytes.
+#[derive(Debug, Clone)]
+pub struct CacheSizes {
+	/// Cache used by availability data.
+	pub availability_data: usize,
+	/// Cache used by availability meta.
+	pub availability_meta: usize,
+	/// Cache used by approval data.
+	pub approval_data: usize,
+}
+
+impl Default for CacheSizes {
+	fn default() -> Self {
+		CacheSizes { availability_data: 25, availability_meta: 1, approval_data: 5 }
+	}
+}
+
+fn other_io_error(err: String) -> io::Error {
+	io::Error::new(io::ErrorKind::Other, err)
+}
+
+/// Open the database on disk, creating it if it doesn't exist.
+pub fn open_creating(root: PathBuf, cache_sizes: CacheSizes) -> io::Result<Arc<dyn KeyValueDB>> {
+	use kvdb_rocksdb::{Database, DatabaseConfig};
+
+	let path = root.join("parachains").join("db");
+
+	let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS);
+
+	let _ = db_config
+		.memory_budget
+		.insert(columns::COL_AVAILABILITY_DATA, cache_sizes.availability_data);
+	let _ = db_config
+		.memory_budget
+		.insert(columns::COL_AVAILABILITY_META, cache_sizes.availability_meta);
+	let _ = db_config
+		.memory_budget
+		.insert(columns::COL_APPROVAL_DATA, cache_sizes.approval_data);
+
+	let path_str = path
+		.to_str()
+		.ok_or_else(|| other_io_error(format!("Bad database path: {:?}", path)))?;
+
+	std::fs::create_dir_all(&path_str)?;
+	let db = Database::open(&db_config, path_str)?;
+
+	Ok(Arc::new(db))
+}
diff --git a/polkadot/bridges/bin/rialto/node/src/service.rs b/polkadot/bridges/bin/rialto/node/src/service.rs
index e29ff2576509dd5df3a9706e95d4a4b8c2ed8c0d..e2e811eaa67f4c796591037a709c0ce9d60b63f4 100644
--- a/polkadot/bridges/bin/rialto/node/src/service.rs
+++ b/polkadot/bridges/bin/rialto/node/src/service.rs
@@ -14,33 +14,47 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
-
-// =====================================================================================
-// =====================================================================================
-// =====================================================================================
-// UPDATE GUIDE:
-// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo);
-// 2) the only thing to keep from old code, is `rpc_extensions_builder` - we use our own custom RPCs;
-// 3) fix compilation errors;
-// 4) test :)
-// =====================================================================================
-// =====================================================================================
-// =====================================================================================
-
-//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
-
+//! Rialto chain node service.
+//!
+//! The code is mostly copy of `service/src/lib.rs` file from Polkadot repository
+//! without optional functions.
+
+// this warning comes from Error enum (sc_cli::Error in particular) && it isn't easy to use box
+// there
+#![allow(clippy::large_enum_variant)]
+// this warning comes from `sc_service::PartialComponents` type
+#![allow(clippy::type_complexity)]
+
+use crate::overseer::{OverseerGen, OverseerGenArgs};
+
+use polkadot_node_core_approval_voting::Config as ApprovalVotingConfig;
+use polkadot_node_core_av_store::Config as AvailabilityConfig;
+use polkadot_node_core_candidate_validation::Config as CandidateValidationConfig;
+use polkadot_node_core_chain_selection::Config as ChainSelectionConfig;
+use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig;
+use polkadot_node_network_protocol::request_response::IncomingRequest;
+use polkadot_overseer::{BlockInfo, OverseerConnector};
+use polkadot_primitives::v1::BlockId;
 use rialto_runtime::{self, opaque::Block, RuntimeApi};
-use sc_client_api::{ExecutorProvider, RemoteBackend};
-use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams};
-pub use sc_executor::NativeElseWasmExecutor;
-
-use sc_keystore::LocalKeystore;
-use sc_service::{error::Error as ServiceError, Configuration, TaskManager};
+use sc_client_api::ExecutorProvider;
+use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch};
+use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider;
+use sc_service::{config::PrometheusConfig, Configuration, TaskManager};
 use sc_telemetry::{Telemetry, TelemetryWorker};
-use sp_consensus::SlotData;
-use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;
+use sp_api::{ConstructRuntimeApi, HeaderT};
+use sp_consensus::SelectChain;
+use sp_runtime::traits::{BlakeTwo256, Block as BlockT};
 use std::{sync::Arc, time::Duration};
+use substrate_prometheus_endpoint::Registry;
+
+pub use polkadot_overseer::Handle;
+pub use polkadot_primitives::v1::ParachainHost;
+pub use sc_client_api::AuxStore;
+pub use sp_authority_discovery::AuthorityDiscoveryApi;
+pub use sp_blockchain::HeaderBackend;
+pub use sp_consensus_babe::BabeApi;
+
+pub type Executor = NativeElseWasmExecutor<ExecutorDispatch>;
 
 // Our native executor instance.
 pub struct ExecutorDispatch;
@@ -57,31 +71,134 @@ impl sc_executor::NativeExecutionDispatch for ExecutorDispatch {
 	}
 }
 
-type FullClient = sc_service::TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
+#[derive(thiserror::Error, Debug)]
+pub enum Error {
+	#[error(transparent)]
+	Io(#[from] std::io::Error),
+
+	#[error(transparent)]
+	Cli(#[from] sc_cli::Error),
+
+	#[error(transparent)]
+	Blockchain(#[from] sp_blockchain::Error),
+
+	#[error(transparent)]
+	Consensus(#[from] sp_consensus::Error),
+
+	#[error(transparent)]
+	Service(#[from] sc_service::Error),
+
+	#[error(transparent)]
+	Telemetry(#[from] sc_telemetry::Error),
+
+	#[error("Failed to create an overseer")]
+	Overseer(#[from] polkadot_overseer::SubsystemError),
+
+	#[error(transparent)]
+	Prometheus(#[from] substrate_prometheus_endpoint::PrometheusError),
+
+	#[error("Authorities require the real overseer implementation")]
+	AuthoritiesRequireRealOverseer,
+
+	#[error("Creating a custom database is required for validators")]
+	DatabasePathRequired,
+}
+
+type FullClient = sc_service::TFullClient<Block, RuntimeApi, Executor>;
 type FullBackend = sc_service::TFullBackend<Block>;
 type FullSelectChain = sc_consensus::LongestChain<FullBackend, Block>;
+type FullGrandpaBlockImport =
+	sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>;
+type FullTransactionPool = sc_transaction_pool::FullPool<Block, FullClient>;
+type FullBabeBlockImport =
+	sc_consensus_babe::BabeBlockImport<Block, FullClient, FullGrandpaBlockImport>;
+type FullBabeLink = sc_consensus_babe::BabeLink<Block>;
+type FullGrandpaLink = sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>;
+
+/// A set of APIs that polkadot-like runtimes must implement.
+///
+/// This is the copy of `polkadot_service::RuntimeApiCollection` with some APIs removed
+/// (right now - MMR and BEEFY).
+pub trait RequiredApiCollection:
+	sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+	+ sp_api::ApiExt<Block>
+	+ sp_consensus_babe::BabeApi<Block>
+	+ sp_finality_grandpa::GrandpaApi<Block>
+	+ polkadot_primitives::v1::ParachainHost<Block>
+	+ sp_block_builder::BlockBuilder<Block>
+	+ frame_system_rpc_runtime_api::AccountNonceApi<
+		Block,
+		bp_rialto::AccountId,
+		rialto_runtime::Index,
+	> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
+	+ sp_api::Metadata<Block>
+	+ sp_offchain::OffchainWorkerApi<Block>
+	+ sp_session::SessionKeys<Block>
+	+ sp_authority_discovery::AuthorityDiscoveryApi<Block>
+where
+	<Self as sp_api::ApiExt<Block>>::StateBackend: sp_api::StateBackend<BlakeTwo256>,
+{
+}
+
+impl<Api> RequiredApiCollection for Api
+where
+	Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue<Block>
+		+ sp_api::ApiExt<Block>
+		+ sp_consensus_babe::BabeApi<Block>
+		+ sp_finality_grandpa::GrandpaApi<Block>
+		+ polkadot_primitives::v1::ParachainHost<Block>
+		+ sp_block_builder::BlockBuilder<Block>
+		+ frame_system_rpc_runtime_api::AccountNonceApi<
+			Block,
+			bp_rialto::AccountId,
+			rialto_runtime::Index,
+		> + pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<Block, bp_rialto::Balance>
+		+ sp_api::Metadata<Block>
+		+ sp_offchain::OffchainWorkerApi<Block>
+		+ sp_session::SessionKeys<Block>
+		+ sp_authority_discovery::AuthorityDiscoveryApi<Block>,
+	<Self as sp_api::ApiExt<Block>>::StateBackend: sp_api::StateBackend<BlakeTwo256>,
+{
+}
+
+// If we're using prometheus, use a registry with a prefix of `polkadot`.
+fn set_prometheus_registry(config: &mut Configuration) -> Result<(), Error> {
+	if let Some(PrometheusConfig { registry, .. }) = config.prometheus_config.as_mut() {
+		*registry = Registry::new_custom(Some("polkadot".into()), None)?;
+	}
+
+	Ok(())
+}
 
-#[allow(clippy::type_complexity)]
 pub fn new_partial(
-	config: &Configuration,
+	config: &mut Configuration,
 ) -> Result<
 	sc_service::PartialComponents<
 		FullClient,
 		FullBackend,
 		FullSelectChain,
 		sc_consensus::DefaultImportQueue<Block, FullClient>,
-		sc_transaction_pool::FullPool<Block, FullClient>,
+		FullTransactionPool,
 		(
-			sc_finality_grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, FullSelectChain>,
-			sc_finality_grandpa::LinkHalf<Block, FullClient, FullSelectChain>,
+			impl Fn(
+				sc_rpc::DenyUnsafe,
+				sc_rpc::SubscriptionTaskExecutor,
+			) -> Result<jsonrpc_core::IoHandler<sc_service::RpcMetadata>, sc_service::Error>,
+			(FullBabeBlockImport, FullGrandpaLink, FullBabeLink),
+			sc_finality_grandpa::SharedVoterState,
+			std::time::Duration,
 			Option<Telemetry>,
 		),
 	>,
-	ServiceError,
-> {
-	if config.keystore_remote.is_some() {
-		return Err(ServiceError::Other("Remote Keystores are not supported.".to_string()));
-	}
+	Error,
+>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, FullClient> + Send + Sync + 'static,
+	<RuntimeApi as ConstructRuntimeApi<Block, FullClient>>::RuntimeApi:
+		RequiredApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
+	ExecutorDispatch: NativeExecutionDispatch + 'static,
+{
+	set_prometheus_registry(config)?;
 
 	let telemetry = config
 		.telemetry_endpoints
@@ -94,10 +211,18 @@ pub fn new_partial(
 		})
 		.transpose()?;
 
-	let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
-		config,
-		telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-	)?;
+	let executor = NativeElseWasmExecutor::<ExecutorDispatch>::new(
+		config.wasm_method,
+		config.default_heap_pages,
+		config.max_runtime_instances,
+	);
+
+	let (client, backend, keystore_container, task_manager) =
+		sc_service::new_full_parts::<Block, RuntimeApi, Executor>(
+			config,
+			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+			executor,
+		)?;
 	let client = Arc::new(client);
 
 	let telemetry = telemetry.map(|(worker, telemetry)| {
@@ -115,166 +240,415 @@ pub fn new_partial(
 		client.clone(),
 	);
 
-	let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
+	let (grandpa_block_import, grandpa_link) =
+		sc_finality_grandpa::block_import_with_authority_set_hard_forks(
+			client.clone(),
+			&(client.clone() as Arc<_>),
+			select_chain.clone(),
+			Vec::new(),
+			telemetry.as_ref().map(|x| x.handle()),
+		)?;
+	let justification_import = grandpa_block_import.clone();
+
+	let babe_config = sc_consensus_babe::Config::get_or_compute(&*client)?;
+	let (block_import, babe_link) =
+		sc_consensus_babe::block_import(babe_config.clone(), grandpa_block_import, client.clone())?;
+
+	let slot_duration = babe_link.config().slot_duration();
+	let import_queue = sc_consensus_babe::import_queue(
+		babe_link.clone(),
+		block_import.clone(),
+		Some(Box::new(justification_import)),
 		client.clone(),
-		&(client.clone() as Arc<_>),
 		select_chain.clone(),
+		move |_, ()| async move {
+			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+			let slot =
+				sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
+					*timestamp,
+					slot_duration,
+				);
+
+			Ok((timestamp, slot))
+		},
+		&task_manager.spawn_essential_handle(),
+		config.prometheus_registry(),
+		sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
 		telemetry.as_ref().map(|x| x.handle()),
 	)?;
 
-	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
+	let justification_stream = grandpa_link.justification_stream();
+	let shared_authority_set = grandpa_link.shared_authority_set().clone();
+	let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
 
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import.clone())),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+	let import_setup = (block_import, grandpa_link, babe_link);
+	let rpc_setup = shared_voter_state.clone();
+
+	let slot_duration = babe_config.slot_duration();
 
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
+	let rpc_extensions_builder = {
+		let client = client.clone();
+		let transaction_pool = transaction_pool.clone();
+		let backend = backend.clone();
+
+		move |deny_unsafe,
+		      subscription_executor: sc_rpc::SubscriptionTaskExecutor|
+		      -> Result<jsonrpc_core::IoHandler<sc_service::RpcMetadata>, sc_service::Error> {
+			use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi};
+			use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler};
+			use substrate_frame_rpc_system::{FullSystem, SystemApi};
+
+			let backend = backend.clone();
+			let client = client.clone();
+			let pool = transaction_pool.clone();
+
+			let shared_voter_state = shared_voter_state.clone();
+
+			let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service(
+				backend,
+				Some(shared_authority_set.clone()),
 			);
 
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
+			let mut io = jsonrpc_core::IoHandler::default();
+			io.extend_with(SystemApi::to_delegate(FullSystem::new(
+				client.clone(),
+				pool,
+				deny_unsafe,
+			)));
+			io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client)));
+			io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new(
+				shared_authority_set.clone(),
+				shared_voter_state,
+				justification_stream.clone(),
+				subscription_executor,
+				finality_proof_provider,
+			)));
+
+			Ok(io)
+		}
+	};
 
 	Ok(sc_service::PartialComponents {
 		client,
 		backend,
 		task_manager,
-		import_queue,
 		keystore_container,
 		select_chain,
+		import_queue,
 		transaction_pool,
-		other: (grandpa_block_import, grandpa_link, telemetry),
+		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, telemetry),
 	})
 }
 
-fn remote_keystore(_url: &str) -> Result<Arc<LocalKeystore>, &'static str> {
-	// FIXME: here would the concrete keystore be built,
-	//        must return a concrete type (NOT `LocalKeystore`) that
-	//        implements `CryptoStore` and `SyncCryptoStore`
-	Err("Remote Keystore not supported.")
+pub struct NewFull<C> {
+	pub task_manager: TaskManager,
+	pub client: C,
+	pub overseer_handle: Option<Handle>,
+	pub network: Arc<sc_network::NetworkService<Block, <Block as BlockT>::Hash>>,
+	pub rpc_handlers: sc_service::RpcHandlers,
+	pub backend: Arc<FullBackend>,
 }
 
-/// Builds a new service for a full client.
-pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
+/// The maximum number of active leaves we forward to the [`Overseer`] on start up.
+const MAX_ACTIVE_LEAVES: usize = 4;
+
+/// Returns the active leaves the overseer should start with.
+async fn active_leaves(
+	select_chain: &sc_consensus::LongestChain<FullBackend, Block>,
+	client: &FullClient,
+) -> Result<Vec<BlockInfo>, Error>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, FullClient> + Send + Sync + 'static,
+	<RuntimeApi as ConstructRuntimeApi<Block, FullClient>>::RuntimeApi:
+		RequiredApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
+	ExecutorDispatch: NativeExecutionDispatch + 'static,
+{
+	let best_block = select_chain.best_chain().await?;
+
+	let mut leaves = select_chain
+		.leaves()
+		.await
+		.unwrap_or_default()
+		.into_iter()
+		.filter_map(|hash| {
+			let number = client.number(hash).ok()??;
+
+			// Only consider leaves that are in maximum an uncle of the best block.
+			if number < best_block.number().saturating_sub(1) || hash == best_block.hash() {
+				return None
+			}
+
+			let parent_hash = client.header(&BlockId::Hash(hash)).ok()??.parent_hash;
+
+			Some(BlockInfo { hash, parent_hash, number })
+		})
+		.collect::<Vec<_>>();
+
+	// Sort by block number and get the maximum number of leaves
+	leaves.sort_by_key(|b| b.number);
+
+	leaves.push(BlockInfo {
+		hash: best_block.hash(),
+		parent_hash: *best_block.parent_hash(),
+		number: *best_block.number(),
+	});
+
+	Ok(leaves.into_iter().rev().take(MAX_ACTIVE_LEAVES).collect())
+}
+
+// Create a new full node.
+pub fn new_full(
+	mut config: Configuration,
+	program_path: Option<std::path::PathBuf>,
+	overseer_gen: impl OverseerGen,
+) -> Result<NewFull<Arc<FullClient>>, Error>
+where
+	RuntimeApi: ConstructRuntimeApi<Block, FullClient> + Send + Sync + 'static,
+	<RuntimeApi as ConstructRuntimeApi<Block, FullClient>>::RuntimeApi:
+		RequiredApiCollection<StateBackend = sc_client_api::StateBackendFor<FullBackend, Block>>,
+	ExecutorDispatch: NativeExecutionDispatch + 'static,
+{
+	let is_collator = false;
+
+	let role = config.role.clone();
+	let force_authoring = config.force_authoring;
+	let backoff_authoring_blocks =
+		Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default());
+
+	let disable_grandpa = config.disable_grandpa;
+	let name = config.network.node_name.clone();
+
 	let sc_service::PartialComponents {
 		client,
 		backend,
 		mut task_manager,
-		import_queue,
-		mut keystore_container,
+		keystore_container,
 		select_chain,
+		import_queue,
 		transaction_pool,
-		other: (block_import, grandpa_link, mut telemetry),
-	} = new_partial(&config)?;
-
-	if let Some(url) = &config.keystore_remote {
-		match remote_keystore(url) {
-			Ok(k) => keystore_container.set_remote_keystore(k),
-			Err(e) => {
-				return Err(ServiceError::Other(format!(
-					"Error hooking up remote keystore for {}: {}",
-					url, e
-				)))
-			}
-		};
-	}
+		other: (rpc_extensions_builder, import_setup, rpc_setup, slot_duration, mut telemetry),
+	} = new_partial(&mut config)?;
 
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
+	let prometheus_registry = config.prometheus_registry().cloned();
 
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: None,
-		block_announce_validator_builder: None,
-	})?;
+	let overseer_connector = OverseerConnector::default();
 
-	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
+	let shared_voter_state = rpc_setup;
+	let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht;
+
+	// Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change
+	// anything in terms of behaviour, but makes the logs more consistent with the other
+	// Substrate nodes.
+	config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config());
+
+	{
+		use polkadot_network_bridge::{peer_sets_info, IsAuthority};
+		let is_authority = if role.is_authority() { IsAuthority::Yes } else { IsAuthority::No };
+		config.network.extra_sets.extend(peer_sets_info(is_authority));
 	}
 
-	let role = config.role.clone();
-	let force_authoring = config.force_authoring;
-	let backoff_authoring_blocks: Option<()> = None;
-	let name = config.network.node_name.clone();
-	let enable_grandpa = !config.disable_grandpa;
-	let prometheus_registry = config.prometheus_registry().cloned();
+	let (pov_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+	let (chunk_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+	let (collation_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+	let (available_data_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+	let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+	let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver();
+	config.network.request_response_protocols.push(cfg);
+
+	let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
+		backend.clone(),
+		import_setup.1.shared_authority_set().clone(),
+		vec![],
+	));
 
-	let shared_voter_state = sc_finality_grandpa::SharedVoterState::empty();
+	let (network, system_rpc_tx, network_starter) =
+		sc_service::build_network(sc_service::BuildNetworkParams {
+			config: &config,
+			client: client.clone(),
+			transaction_pool: transaction_pool.clone(),
+			spawn_handle: task_manager.spawn_handle(),
+			import_queue,
+			on_demand: None,
+			block_announce_validator_builder: None,
+			warp_sync: Some(warp_sync),
+		})?;
 
-	let rpc_extensions_builder = {
-		use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider;
+	if config.offchain_worker.enabled {
+		let _ = sc_service::build_offchain_workers(
+			&config,
+			task_manager.spawn_handle(),
+			client.clone(),
+			network.clone(),
+		);
+	}
 
-		use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi};
-		use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler};
-		use sc_rpc::DenyUnsafe;
-		use substrate_frame_rpc_system::{FullSystem, SystemApi};
+	let parachains_db = crate::parachains_db::open_creating(
+		config.database.path().ok_or(Error::DatabasePathRequired)?.into(),
+		crate::parachains_db::CacheSizes::default(),
+	)?;
 
-		let backend = backend.clone();
-		let client = client.clone();
-		let pool = transaction_pool.clone();
+	let availability_config = AvailabilityConfig {
+		col_data: crate::parachains_db::REAL_COLUMNS.col_availability_data,
+		col_meta: crate::parachains_db::REAL_COLUMNS.col_availability_meta,
+	};
 
-		let justification_stream = grandpa_link.justification_stream();
-		let shared_authority_set = grandpa_link.shared_authority_set().clone();
-		let shared_voter_state = shared_voter_state.clone();
+	let approval_voting_config = ApprovalVotingConfig {
+		col_data: crate::parachains_db::REAL_COLUMNS.col_approval_data,
+		slot_duration_millis: slot_duration.as_millis() as u64,
+	};
 
-		let finality_proof_provider =
-			GrandpaFinalityProofProvider::new_for_service(backend, Some(shared_authority_set.clone()));
+	let candidate_validation_config = CandidateValidationConfig {
+		artifacts_cache_path: config
+			.database
+			.path()
+			.ok_or(Error::DatabasePathRequired)?
+			.join("pvf-artifacts"),
+		program_path: match program_path {
+			None => std::env::current_exe()?,
+			Some(p) => p,
+		},
+	};
 
-		Box::new(move |_, subscription_executor| {
-			let mut io = jsonrpc_core::IoHandler::default();
-			io.extend_with(SystemApi::to_delegate(FullSystem::new(
-				client.clone(),
-				pool.clone(),
-				DenyUnsafe::No,
-			)));
-			io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(
-				client.clone(),
-			)));
-			io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new(
-				shared_authority_set.clone(),
-				shared_voter_state.clone(),
-				justification_stream.clone(),
-				subscription_executor,
-				finality_proof_provider.clone(),
-			)));
+	let chain_selection_config = ChainSelectionConfig {
+		col_data: crate::parachains_db::REAL_COLUMNS.col_chain_selection_data,
+		stagnant_check_interval: polkadot_node_core_chain_selection::StagnantCheckInterval::never(),
+	};
 
-			io
-		})
+	let dispute_coordinator_config = DisputeCoordinatorConfig {
+		col_data: crate::parachains_db::REAL_COLUMNS.col_dispute_coordinator_data,
 	};
 
-	let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
-		network: network.clone(),
+	let rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+		config,
+		backend: backend.clone(),
 		client: client.clone(),
 		keystore: keystore_container.sync_keystore(),
-		task_manager: &mut task_manager,
+		network: network.clone(),
+		rpc_extensions_builder: Box::new(rpc_extensions_builder),
 		transaction_pool: transaction_pool.clone(),
-		rpc_extensions_builder,
+		task_manager: &mut task_manager,
 		on_demand: None,
 		remote_blockchain: None,
-		backend,
 		system_rpc_tx,
-		config,
 		telemetry: telemetry.as_mut(),
 	})?;
 
+	let (block_import, link_half, babe_link) = import_setup;
+
+	let overseer_client = client.clone();
+	let spawner = task_manager.spawn_handle();
+	let active_leaves = futures::executor::block_on(active_leaves(&select_chain, &*client))?;
+
+	let authority_discovery_service = if role.is_authority() || is_collator {
+		use futures::StreamExt;
+		use sc_network::Event;
+
+		let authority_discovery_role = if role.is_authority() {
+			sc_authority_discovery::Role::PublishAndDiscover(keystore_container.keystore())
+		} else {
+			// don't publish our addresses when we're only a collator
+			sc_authority_discovery::Role::Discover
+		};
+		let dht_event_stream =
+			network.event_stream("authority-discovery").filter_map(|e| async move {
+				match e {
+					Event::Dht(e) => Some(e),
+					_ => None,
+				}
+			});
+		let (worker, service) = sc_authority_discovery::new_worker_and_service_with_config(
+			sc_authority_discovery::WorkerConfig {
+				publish_non_global_ips: auth_disc_publish_non_global_ips,
+				..Default::default()
+			},
+			client.clone(),
+			network.clone(),
+			Box::pin(dht_event_stream),
+			authority_discovery_role,
+			prometheus_registry.clone(),
+		);
+
+		task_manager.spawn_handle().spawn("authority-discovery-worker", worker.run());
+		Some(service)
+	} else {
+		None
+	};
+
+	// we'd say let overseer_handler =
+	// authority_discovery_service.map(|authority_discovery_service|, ...), but in that case we
+	// couldn't use ? to propagate errors
+	let local_keystore = keystore_container.local_keystore();
+	let maybe_params =
+		local_keystore.and_then(move |k| authority_discovery_service.map(|a| (a, k)));
+
+	let overseer_handle = if let Some((authority_discovery_service, keystore)) = maybe_params {
+		let (overseer, overseer_handle) = overseer_gen
+			.generate::<sc_service::SpawnTaskHandle, FullClient>(
+				overseer_connector,
+				OverseerGenArgs {
+					leaves: active_leaves,
+					keystore,
+					runtime_client: overseer_client.clone(),
+					parachains_db,
+					availability_config,
+					approval_voting_config,
+					network_service: network.clone(),
+					authority_discovery_service,
+					registry: prometheus_registry.as_ref(),
+					spawner,
+					candidate_validation_config,
+					available_data_req_receiver,
+					chain_selection_config,
+					chunk_req_receiver,
+					collation_req_receiver,
+					dispute_coordinator_config,
+					dispute_req_receiver,
+					pov_req_receiver,
+					statement_req_receiver,
+				},
+			)?;
+		let handle = Handle::new(overseer_handle);
+
+		{
+			let handle = handle.clone();
+			task_manager.spawn_essential_handle().spawn_blocking(
+				"overseer",
+				Box::pin(async move {
+					use futures::{pin_mut, select, FutureExt};
+
+					let forward = polkadot_overseer::forward_events(overseer_client, handle);
+
+					let forward = forward.fuse();
+					let overseer_fut = overseer.run().fuse();
+
+					pin_mut!(overseer_fut);
+					pin_mut!(forward);
+
+					select! {
+						_ = forward => (),
+						_ = overseer_fut => (),
+						complete => (),
+					}
+				}),
+			);
+		}
+
+		Some(handle)
+	} else {
+		None
+	};
+
 	if role.is_authority() {
-		let proposer_factory = sc_basic_authorship::ProposerFactory::new(
+		let can_author_with =
+			sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
+
+		let proposer = sc_basic_authorship::ProposerFactory::new(
 			task_manager.spawn_handle(),
 			client.clone(),
 			transaction_pool,
@@ -282,208 +656,113 @@ pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError>
 			telemetry.as_ref().map(|x| x.handle()),
 		);
 
-		let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone());
-
-		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
-		let raw_slot_duration = slot_duration.slot_duration();
-
-		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _, _>(StartAuraParams {
-			slot_duration,
-			client,
+		let client_clone = client.clone();
+		let overseer_handle =
+			overseer_handle.as_ref().ok_or(Error::AuthoritiesRequireRealOverseer)?.clone();
+		let slot_duration = babe_link.config().slot_duration();
+		let babe_config = sc_consensus_babe::BabeParams {
+			keystore: keystore_container.sync_keystore(),
+			client: client.clone(),
 			select_chain,
 			block_import,
-			proposer_factory,
-			create_inherent_data_providers: move |_, ()| async move {
-				let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-				let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-					*timestamp,
-					raw_slot_duration,
-				);
-
-				Ok((timestamp, slot))
+			env: proposer,
+			sync_oracle: network.clone(),
+			justification_sync_link: network.clone(),
+			create_inherent_data_providers: move |parent, ()| {
+				let client_clone = client_clone.clone();
+				let overseer_handle = overseer_handle.clone();
+				async move {
+					let parachain = polkadot_node_core_parachains_inherent::ParachainsInherentDataProvider::create(
+						&*client_clone,
+						overseer_handle,
+						parent,
+					)
+					.await
+					.map_err(Box::new)?;
+
+					let uncles = sc_consensus_uncles::create_uncles_inherent_data_provider(
+						&*client_clone,
+						parent,
+					)?;
+
+					let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+					let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_duration(
+						*timestamp,
+						slot_duration,
+					);
+
+					Ok((timestamp, slot, uncles, parachain))
+				}
 			},
 			force_authoring,
 			backoff_authoring_blocks,
-			keystore: keystore_container.sync_keystore(),
+			babe_link,
 			can_author_with,
-			sync_oracle: network.clone(),
-			justification_sync_link: network.clone(),
-			block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32),
+			block_proposal_slot_portion: sc_consensus_babe::SlotProportion::new(2f32 / 3f32),
 			max_block_proposal_slot_portion: None,
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
-		})?;
+		};
 
-		// the AURA authoring task is considered essential, i.e. if it
-		// fails we take down the service with it.
-		task_manager.spawn_essential_handle().spawn_blocking("aura", aura);
+		let babe = sc_consensus_babe::start_babe(babe_config)?;
+		task_manager.spawn_essential_handle().spawn_blocking("babe", babe);
 	}
 
 	// if the node isn't actively participating in consensus then it doesn't
 	// need a keystore, regardless of which protocol we use below.
-	let keystore = if role.is_authority() {
-		Some(keystore_container.sync_keystore())
-	} else {
-		None
-	};
+	let keystore_opt =
+		if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None };
 
-	let grandpa_config = sc_finality_grandpa::Config {
-		// FIXME #1578 make this available through chainspec
-		gossip_duration: Duration::from_millis(333),
+	let config = sc_finality_grandpa::Config {
+		// FIXME substrate#1578 make this available through chainspec
+		gossip_duration: Duration::from_millis(1000),
 		justification_period: 512,
 		name: Some(name),
 		observer_enabled: false,
-		keystore,
+		keystore: keystore_opt,
 		local_role: role,
 		telemetry: telemetry.as_ref().map(|x| x.handle()),
 	};
 
+	let enable_grandpa = !disable_grandpa;
 	if enable_grandpa {
 		// start the full GRANDPA voter
-		// NOTE: non-authorities could run the GRANDPA observer protocol, but at
-		// this point the full voter should provide better guarantees of block
-		// and vote data availability than the observer. The observer has not
-		// been tested extensively yet and having most nodes in a network run it
-		// could lead to finality stalls.
+		// NOTE: unlike in substrate we are currently running the full
+		// GRANDPA voter protocol for all full nodes (regardless of whether
+		// they're validators or not). at this point the full voter should
+		// provide better guarantees of block and vote data availability than
+		// the observer.
+
+		// add a custom voting rule to temporarily stop voting for new blocks
+		// after the given pause block is finalized and restarting after the
+		// given delay.
+		let builder = sc_finality_grandpa::VotingRulesBuilder::default();
+
+		let voting_rule = builder.build();
 		let grandpa_config = sc_finality_grandpa::GrandpaParams {
-			config: grandpa_config,
-			link: grandpa_link,
-			network,
-			voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(),
+			config,
+			link: link_half,
+			network: network.clone(),
+			voting_rule,
 			prometheus_registry,
 			shared_voter_state,
 			telemetry: telemetry.as_ref().map(|x| x.handle()),
 		};
 
-		// the GRANDPA voter task is considered infallible, i.e.
-		// if it fails we take down the service with it.
-		task_manager
-			.spawn_essential_handle()
-			.spawn_blocking("grandpa-voter", sc_finality_grandpa::run_grandpa_voter(grandpa_config)?);
+		task_manager.spawn_essential_handle().spawn_blocking(
+			"grandpa-voter",
+			sc_finality_grandpa::run_grandpa_voter(grandpa_config)?,
+		);
 	}
 
 	network_starter.start_network();
-	Ok(task_manager)
-}
 
-/// Builds a new service for a light client.
-pub fn new_light(mut config: Configuration) -> Result<TaskManager, ServiceError> {
-	let telemetry = config
-		.telemetry_endpoints
-		.clone()
-		.filter(|x| !x.is_empty())
-		.map(|endpoints| -> Result<_, sc_telemetry::Error> {
-			let worker = TelemetryWorker::new(16)?;
-			let telemetry = worker.handle().new_telemetry(endpoints);
-			Ok((worker, telemetry))
-		})
-		.transpose()?;
-
-	let (client, backend, keystore_container, mut task_manager, on_demand) =
-		sc_service::new_light_parts::<Block, RuntimeApi, Executor>(
-			&config,
-			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
-		)?;
-
-	let mut telemetry = telemetry.map(|(worker, telemetry)| {
-		task_manager.spawn_handle().spawn("telemetry", worker.run());
-		telemetry
-	});
-
-	config
-		.network
-		.extra_sets
-		.push(sc_finality_grandpa::grandpa_peers_set_config());
-
-	let select_chain = sc_consensus::LongestChain::new(backend.clone());
-
-	let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light(
-		config.transaction_pool.clone(),
-		config.prometheus_registry(),
-		task_manager.spawn_essential_handle(),
-		client.clone(),
-		on_demand.clone(),
-	));
-
-	let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import(
-		client.clone(),
-		&(client.clone() as Arc<_>),
-		select_chain,
-		telemetry.as_ref().map(|x| x.handle()),
-	)?;
-
-	let slot_duration = sc_consensus_aura::slot_duration(&*client)?.slot_duration();
-
-	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _, _>(ImportQueueParams {
-		block_import: grandpa_block_import.clone(),
-		justification_import: Some(Box::new(grandpa_block_import)),
-		client: client.clone(),
-		create_inherent_data_providers: move |_, ()| async move {
-			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
-			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
-				*timestamp,
-				slot_duration,
-			);
-
-			Ok((timestamp, slot))
-		},
-		spawner: &task_manager.spawn_essential_handle(),
-		can_author_with: sp_consensus::NeverCanAuthor,
-		registry: config.prometheus_registry(),
-		check_for_equivocation: Default::default(),
-		telemetry: telemetry.as_ref().map(|x| x.handle()),
-	})?;
-
-	let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams {
-		config: &config,
-		client: client.clone(),
-		transaction_pool: transaction_pool.clone(),
-		spawn_handle: task_manager.spawn_handle(),
-		import_queue,
-		on_demand: Some(on_demand.clone()),
-		block_announce_validator_builder: None,
-	})?;
-
-	if config.offchain_worker.enabled {
-		sc_service::build_offchain_workers(&config, task_manager.spawn_handle(), client.clone(), network.clone());
-	}
-
-	let enable_grandpa = !config.disable_grandpa;
-	if enable_grandpa {
-		let name = config.network.node_name.clone();
-
-		let config = sc_finality_grandpa::Config {
-			gossip_duration: std::time::Duration::from_millis(333),
-			justification_period: 512,
-			name: Some(name),
-			observer_enabled: false,
-			keystore: None,
-			local_role: config.role.clone(),
-			telemetry: telemetry.as_ref().map(|x| x.handle()),
-		};
-
-		task_manager.spawn_handle().spawn_blocking(
-			"grandpa-observer",
-			sc_finality_grandpa::run_grandpa_observer(config, grandpa_link, network.clone())?,
-		);
-	}
-
-	sc_service::spawn_tasks(sc_service::SpawnTasksParams {
-		remote_blockchain: Some(backend.remote_blockchain()),
-		transaction_pool,
-		task_manager: &mut task_manager,
-		on_demand: Some(on_demand),
-		rpc_extensions_builder: Box::new(|_, _| ()),
-		config,
-		client,
-		keystore: keystore_container.sync_keystore(),
-		backend,
-		network,
-		system_rpc_tx,
-		telemetry: telemetry.as_mut(),
-	})?;
+	Ok(NewFull { task_manager, client, overseer_handle, network, rpc_handlers, backend })
+}
 
-	network_starter.start_network();
-	Ok(task_manager)
+pub fn build_full(
+	config: Configuration,
+	overseer_gen: impl OverseerGen,
+) -> Result<NewFull<Arc<FullClient>>, Error> {
+	new_full(config, None, overseer_gen)
 }
diff --git a/polkadot/bridges/bin/rialto/runtime/Cargo.toml b/polkadot/bridges/bin/rialto/runtime/Cargo.toml
index 4902c0c06a3df4bd5da28f5fcff1055793a23ec5..ff205da15585d25fcd9007c58b062380ed1b5bbe 100644
--- a/polkadot/bridges/bin/rialto/runtime/Cargo.toml
+++ b/polkadot/bridges/bin/rialto/runtime/Cargo.toml
@@ -8,11 +8,12 @@ repository = "https://github.com/paritytech/parity-bridges-common/"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] }
 hex-literal = "0.3"
-libsecp256k1 = { version = "0.3.4", optional = true, default-features = false, features = ["hmac"] }
+libsecp256k1 = { version = "0.7", optional = true, default-features = false, features = ["hmac"] }
 log = { version = "0.4.14", default-features = false }
-serde = { version = "1.0.124", optional = true, features = ["derive"] }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0", optional = true, features = ["derive"] }
 
 # Bridge dependencies
 
@@ -34,38 +35,44 @@ pallet-shift-session-manager = { path = "../../../modules/shift-session-manager"
 
 # Substrate Dependencies
 
-frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
-frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
+# Polkadot (parachain) Dependencies
+
+polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
+polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false }
 
 [dev-dependencies]
-libsecp256k1 = { version = "0.3.4", features = ["hmac"] }
+libsecp256k1 = { version = "0.7", features = ["hmac"] }
 
 [build-dependencies]
 substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -89,7 +96,8 @@ std = [
 	"frame-system-rpc-runtime-api/std",
 	"frame-system/std",
 	"log/std",
-	"pallet-aura/std",
+	"pallet-authority-discovery/std",
+	"pallet-babe/std",
 	"pallet-balances/std",
 	"pallet-bridge-currency-exchange/std",
 	"pallet-bridge-dispatch/std",
@@ -97,16 +105,20 @@ std = [
 	"pallet-bridge-grandpa/std",
 	"pallet-bridge-messages/std",
 	"pallet-grandpa/std",
-	"pallet-randomness-collective-flip/std",
 	"pallet-shift-session-manager/std",
 	"pallet-sudo/std",
 	"pallet-timestamp/std",
 	"pallet-transaction-payment-rpc-runtime-api/std",
 	"pallet-transaction-payment/std",
+	"polkadot-primitives/std",
+	"polkadot-runtime-common/std",
+	"polkadot-runtime-parachains/std",
+	"scale-info/std",
 	"serde",
 	"sp-api/std",
+	"sp-authority-discovery/std",
 	"sp-block-builder/std",
-	"sp-consensus-aura/std",
+	"sp-consensus-babe/std",
 	"sp-core/std",
 	"sp-finality-grandpa/std",
 	"sp-inherents/std",
diff --git a/polkadot/bridges/bin/rialto/runtime/src/benches.rs b/polkadot/bridges/bin/rialto/runtime/src/benches.rs
index 86d6b8361c635da70ea231cdc51f00c7969a2879..ce3f84069795a4e240a27157949ec836ed2c330d 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/benches.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/benches.rs
@@ -22,7 +22,8 @@ use pallet_bridge_eth_poa::{ValidatorsConfiguration, ValidatorsSource};
 use sp_std::vec;
 
 pub use crate::kovan::{
-	genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval, PruningStrategy,
+	genesis_header, genesis_validators, BridgeAuraConfiguration, FinalityVotesCachingInterval,
+	PruningStrategy,
 };
 
 frame_support::parameter_types! {
diff --git a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs
index 3b9c88112e4b4c90efbf801c7ac7f64030447440..403a6e3faff99e3dbf41a654b6373429c9d42b60 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/exchange.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/exchange.rs
@@ -28,19 +28,21 @@
 //! 5) receive tokens by providing proof-of-inclusion of PoA transaction.
 
 use bp_currency_exchange::{
-	Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction, Result as ExchangeResult,
+	Error as ExchangeError, LockFundsTransaction, MaybeLockFundsTransaction,
+	Result as ExchangeResult,
 };
 use bp_eth_poa::{transaction_decode_rlp, RawTransaction, RawTransactionReceipt};
 use codec::{Decode, Encode};
 use frame_support::RuntimeDebug;
 use hex_literal::hex;
+use scale_info::TypeInfo;
 use sp_std::vec::Vec;
 
 /// Ethereum address where locked PoA funds must be sent to.
 pub const LOCK_FUNDS_ADDRESS: [u8; 20] = hex!("DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF");
 
 /// Ethereum transaction inclusion proof.
-#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)]
+#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct EthereumTransactionInclusionProof {
 	/// Hash of the block with transaction.
 	pub block: sp_core::H256,
@@ -55,9 +57,9 @@ pub struct EthereumTransactionInclusionProof {
 ///
 /// The assumption is that this pair will never appear more than once in
 /// transactions included into finalized blocks. This is obviously true
-/// for any existing eth-like chain (that keep current transaction format),
-/// because otherwise transaction can be replayed over and over.
-#[derive(Encode, Decode, PartialEq, RuntimeDebug)]
+/// for any existing eth-like chain (that keep current TX format), because
+/// otherwise transaction can be replayed over and over.
+#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct EthereumTransactionTag {
 	/// Account that has locked funds.
 	pub account: [u8; 20],
@@ -65,7 +67,7 @@ pub struct EthereumTransactionTag {
 	pub nonce: sp_core::U256,
 }
 
-/// Eth transaction from runtime perspective.
+/// Ethereum transaction from runtime perspective.
 pub struct EthTransaction;
 
 impl MaybeLockFundsTransaction for EthTransaction {
@@ -87,7 +89,7 @@ impl MaybeLockFundsTransaction for EthTransaction {
 				tx.unsigned.to,
 			);
 
-			return Err(ExchangeError::InvalidTransaction);
+			return Err(ExchangeError::InvalidTransaction)
 		}
 
 		let mut recipient_raw = sp_core::H256::default();
@@ -100,8 +102,8 @@ impl MaybeLockFundsTransaction for EthTransaction {
 					len,
 				);
 
-				return Err(ExchangeError::InvalidRecipient);
-			}
+				return Err(ExchangeError::InvalidRecipient)
+			},
 		}
 		let amount = tx.unsigned.value.low_u128();
 
@@ -112,7 +114,7 @@ impl MaybeLockFundsTransaction for EthTransaction {
 				tx.unsigned.value,
 			);
 
-			return Err(ExchangeError::InvalidAmount);
+			return Err(ExchangeError::InvalidAmount)
 		}
 
 		Ok(LockFundsTransaction {
@@ -128,7 +130,7 @@ impl MaybeLockFundsTransaction for EthTransaction {
 
 /// Prepares everything required to bench claim of funds locked by given transaction.
 #[cfg(feature = "runtime-benchmarks")]
-pub(crate) fn prepare_environment_for_claim<T: pallet_bridge_eth_poa::Config<I>, I: frame_support::traits::Instance>(
+pub(crate) fn prepare_environment_for_claim<T: pallet_bridge_eth_poa::Config<I>, I: 'static>(
 	transactions: &[(RawTransaction, RawTransactionReceipt)],
 ) -> bp_eth_poa::H256 {
 	use bp_eth_poa::compute_merkle_root;
@@ -161,7 +163,7 @@ pub(crate) fn prepare_ethereum_transaction(
 	// chain id is 0x11
 	// sender secret is 0x4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7
 	let chain_id = 0x11;
-	let signer = secp256k1::SecretKey::parse(&hex!(
+	let signer = libsecp256k1::SecretKey::parse(&hex!(
 		"4d5db4107d237df6a3d58ee5f70ae63d73d7658d4026f2eefd2f204c81682cb7"
 	))
 	.unwrap();
@@ -213,10 +215,7 @@ mod tests {
 
 	#[test]
 	fn invalid_transaction_rejected() {
-		assert_eq!(
-			EthTransaction::parse(&Vec::new()),
-			Err(ExchangeError::InvalidTransaction),
-		);
+		assert_eq!(EthTransaction::parse(&Vec::new()), Err(ExchangeError::InvalidTransaction),);
 	}
 
 	#[test]
diff --git a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs
index 528c6205846f4c89cf6e3f1fc6836ba1d8b6d825..64adec8a9a8f389ada6b58a5d59863eb0440bd17 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/kovan.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/kovan.rs
@@ -21,8 +21,8 @@ use bp_header_chain::InclusionProofVerifier;
 use frame_support::RuntimeDebug;
 use hex_literal::hex;
 use pallet_bridge_eth_poa::{
-	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy, ValidatorsConfiguration,
-	ValidatorsSource,
+	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as BridgePruningStrategy,
+	ValidatorsConfiguration, ValidatorsSource,
 };
 use sp_std::prelude::*;
 
@@ -34,8 +34,8 @@ frame_support::parameter_types! {
 		kovan_validators_configuration();
 }
 
-/// Max number of finalized headers to keep. It is equivalent of approximately
-/// 24 hours of finalized blocks on current Kovan chain.
+/// Max number of finalized headers to keep. It is equivalent of around 24 hours of
+/// finalized blocks on current Kovan chain.
 const FINALIZED_HEADERS_TO_KEEP: u64 = 20_000;
 
 /// Aura engine configuration for Kovan chain.
@@ -102,11 +102,14 @@ pub fn genesis_header() -> AuraHeader {
 		timestamp: 0,
 		number: 0,
 		author: Default::default(),
-		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
-		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(),
+		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
+		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
+			.into(),
 		extra_data: vec![],
 		state_root: hex!("2480155b48a1cea17d67dbfdfaafe821c1d19cdd478c5358e8ec56dec24502b2").into(),
-		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
+		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
 		log_bloom: Default::default(),
 		gas_used: Default::default(),
 		gas_limit: 6000000.into(),
@@ -114,8 +117,9 @@ pub fn genesis_header() -> AuraHeader {
 		seal: vec![
 			vec![128],
 			vec![
-				184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				184, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
 			],
 		],
 	}
@@ -153,12 +157,17 @@ impl InclusionProofVerifier for KovanBlockchain {
 	type Transaction = RawTransaction;
 	type TransactionInclusionProof = EthereumTransactionInclusionProof;
 
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction> {
-		let is_transaction_finalized =
-			crate::BridgeKovan::verify_transaction_finalized(proof.block, proof.index, &proof.proof);
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction> {
+		let is_transaction_finalized = crate::BridgeKovan::verify_transaction_finalized(
+			proof.block,
+			proof.index,
+			&proof.proof,
+		);
 
 		if !is_transaction_finalized {
-			return None;
+			return None
 		}
 
 		proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone())
diff --git a/polkadot/bridges/bin/rialto/runtime/src/lib.rs b/polkadot/bridges/bin/rialto/runtime/src/lib.rs
index 3285a90e1e13a5f9d364968fd6ff3b94a9bcad36..d811e50611665fc704299bdfcef6d1546a25f303 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/lib.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/lib.rs
@@ -36,6 +36,7 @@ pub mod exchange;
 pub mod benches;
 pub mod kovan;
 pub mod millau_messages;
+pub mod parachains;
 pub mod rialto_poa;
 
 use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge};
@@ -43,21 +44,20 @@ use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge};
 use bridge_runtime_common::messages::{
 	source::estimate_message_dispatch_and_delivery_fee, MessageBridge,
 };
-use codec::Decode;
 use pallet_grandpa::{
 	fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList,
 };
-use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo};
+use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo};
 use sp_api::impl_runtime_apis;
-use sp_consensus_aura::sr25519::AuthorityId as AuraId;
+use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
 use sp_core::{crypto::KeyTypeId, OpaqueMetadata};
 use sp_runtime::{
 	create_runtime_str, generic, impl_opaque_keys,
-	traits::{Block as BlockT, IdentityLookup, NumberFor, OpaqueKeys},
+	traits::{AccountIdLookup, Block as BlockT, NumberFor, OpaqueKeys},
 	transaction_validity::{TransactionSource, TransactionValidity},
-	ApplyExtrinsicResult, MultiSignature, MultiSigner,
+	ApplyExtrinsicResult, FixedPointNumber, MultiSignature, MultiSigner, Perquintill,
 };
-use sp_std::prelude::*;
+use sp_std::{collections::btree_map::BTreeMap, prelude::*};
 #[cfg(feature = "std")]
 use sp_version::NativeVersion;
 use sp_version::RuntimeVersion;
@@ -101,7 +101,7 @@ pub type AccountIndex = u32;
 pub type Balance = bp_rialto::Balance;
 
 /// Index of a transaction in the chain.
-pub type Index = u32;
+pub type Index = bp_rialto::Index;
 
 /// A hash of some data used by the chain.
 pub type Hash = bp_rialto::Hash;
@@ -131,8 +131,11 @@ pub mod opaque {
 
 impl_opaque_keys! {
 	pub struct SessionKeys {
-		pub aura: Aura,
+		pub babe: Babe,
 		pub grandpa: Grandpa,
+		pub para_validator: Initializer,
+		pub para_assignment: SessionInfo,
+		pub authority_discovery: AuthorityDiscovery,
 	}
 }
 
@@ -171,7 +174,7 @@ impl frame_system::Config for Runtime {
 	/// The aggregated dispatch type that is available for extrinsics.
 	type Call = Call;
 	/// The lookup mechanism to get account ID from whatever is passed in dispatchers.
-	type Lookup = IdentityLookup<AccountId>;
+	type Lookup = AccountIdLookup<AccountId, ()>;
 	/// The index type for storing how many extrinsics an account has signed.
 	type Index = Index;
 	/// The index type for blocks.
@@ -213,15 +216,41 @@ impl frame_system::Config for Runtime {
 	type OnSetCode = ();
 }
 
-impl pallet_randomness_collective_flip::Config for Runtime {}
+/// The BABE epoch configuration at genesis.
+pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration =
+	sp_consensus_babe::BabeEpochConfiguration {
+		c: bp_rialto::time_units::PRIMARY_PROBABILITY,
+		allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots,
+	};
 
 parameter_types! {
+	pub const EpochDuration: u64 = bp_rialto::EPOCH_DURATION_IN_SLOTS as u64;
+	pub const ExpectedBlockTime: bp_rialto::Moment = bp_rialto::time_units::MILLISECS_PER_BLOCK;
 	pub const MaxAuthorities: u32 = 10;
 }
 
-impl pallet_aura::Config for Runtime {
-	type AuthorityId = AuraId;
+impl pallet_babe::Config for Runtime {
+	type EpochDuration = EpochDuration;
+	type ExpectedBlockTime = ExpectedBlockTime;
 	type MaxAuthorities = MaxAuthorities;
+
+	// session module is the trigger
+	type EpochChangeTrigger = pallet_babe::ExternalTrigger;
+
+	// equivocation related configuration - we don't expect any equivocations in our testnets
+	type KeyOwnerProofSystem = ();
+	type KeyOwnerProof = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		pallet_babe::AuthorityId,
+	)>>::Proof;
+	type KeyOwnerIdentification = <Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(
+		KeyTypeId,
+		pallet_babe::AuthorityId,
+	)>>::IdentificationTuple;
+	type HandleEquivocation = ();
+
+	type DisabledValidators = ();
+	type WeightInfo = ();
 }
 
 type RialtoPoA = pallet_bridge_eth_poa::Instance1;
@@ -268,7 +297,7 @@ impl pallet_bridge_currency_exchange::Config<KovanCurrencyExchange> for Runtime
 
 impl pallet_bridge_dispatch::Config for Runtime {
 	type Event = Event;
-	type MessageId = (bp_messages::LaneId, bp_messages::MessageNonce);
+	type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce);
 	type Call = Call;
 	type CallFilter = frame_support::traits::Everything;
 	type EncodedCall = crate::millau_messages::FromMillauEncodedCall;
@@ -288,14 +317,14 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 		recipient: Self::Recipient,
 		amount: Self::Amount,
 	) -> bp_currency_exchange::Result<()> {
-		// let balances module make all checks for us (it won't allow depositing lower than existential
-		// deposit, balance overflow, ...)
+		// let balances module make all checks for us (it won't allow depositing lower than
+		// existential deposit, balance overflow, ...)
 		let deposited = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
 			&recipient, amount,
 		);
 
-		// I'm dropping deposited here explicitly to illustrate the fact that it'll update `TotalIssuance`
-		// on drop
+		// I'm dropping deposited here explicitly to illustrate the fact that it'll update
+		// `TotalIssuance` on drop
 		let deposited_amount = deposited.peek();
 		drop(deposited);
 
@@ -343,6 +372,7 @@ impl bp_currency_exchange::DepositInto for DepositInto {
 impl pallet_grandpa::Config for Runtime {
 	type Event = Event;
 	type Call = Call;
+	type MaxAuthorities = MaxAuthorities;
 	type KeyOwnerProofSystem = ();
 	type KeyOwnerProof =
 		<Self::KeyOwnerProofSystem as KeyOwnerProofSystem<(KeyTypeId, GrandpaId)>>::Proof;
@@ -361,9 +391,9 @@ parameter_types! {
 }
 
 impl pallet_timestamp::Config for Runtime {
-	/// A timestamp: milliseconds since the Unix epoch.
-	type Moment = u64;
-	type OnTimestampSet = Aura;
+	/// A timestamp: milliseconds since the UNIX epoch.
+	type Moment = bp_rialto::Moment;
+	type OnTimestampSet = Babe;
 	type MinimumPeriod = MinimumPeriod;
 	// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
 	type WeightInfo = ();
@@ -395,13 +425,25 @@ impl pallet_balances::Config for Runtime {
 parameter_types! {
 	pub const TransactionBaseFee: Balance = 0;
 	pub const TransactionByteFee: Balance = 1;
+	pub const OperationalFeeMultiplier: u8 = 5;
+	// values for following parameters are copied from polkadot repo, but it is fine
+	// not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains
+	pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25);
+	pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000);
+	pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128);
 }
 
 impl pallet_transaction_payment::Config for Runtime {
 	type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter<Balances, ()>;
 	type TransactionByteFee = TransactionByteFee;
-	type WeightToFee = IdentityFee<Balance>;
-	type FeeMultiplierUpdate = ();
+	type OperationalFeeMultiplier = OperationalFeeMultiplier;
+	type WeightToFee = bp_rialto::WeightToFee;
+	type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment<
+		Runtime,
+		TargetBlockFullness,
+		AdjustmentVariable,
+		MinimumMultiplier,
+	>;
 }
 
 impl pallet_sudo::Config for Runtime {
@@ -409,17 +451,12 @@ impl pallet_sudo::Config for Runtime {
 	type Call = Call;
 }
 
-parameter_types! {
-	pub const Period: BlockNumber = bp_rialto::SESSION_LENGTH;
-	pub const Offset: BlockNumber = 0;
-}
-
 impl pallet_session::Config for Runtime {
 	type Event = Event;
 	type ValidatorId = <Self as frame_system::Config>::AccountId;
 	type ValidatorIdOf = ();
-	type ShouldEndSession = pallet_session::PeriodicSessions<Period, Offset>;
-	type NextSessionRotation = pallet_session::PeriodicSessions<Period, Offset>;
+	type ShouldEndSession = Babe;
+	type NextSessionRotation = Babe;
 	type SessionManager = pallet_shift_session_manager::Pallet<Runtime>;
 	type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
 	type Keys = SessionKeys;
@@ -427,6 +464,10 @@ impl pallet_session::Config for Runtime {
 	type WeightInfo = ();
 }
 
+impl pallet_authority_discovery::Config for Runtime {
+	type MaxAuthorities = MaxAuthorities;
+}
+
 parameter_types! {
 	/// This is a pretty unscientific cap.
 	///
@@ -475,10 +516,11 @@ parameter_types! {
 	pub const GetDeliveryConfirmationTransactionFee: Balance =
 		bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _;
 	pub const RootAccountForPayments: Option<AccountId> = None;
+  pub const BridgedChainId: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID;
 }
 
 /// Instance of the messages pallet used to relay messages to/from Millau chain.
-pub type WithMillauMessagesInstance = pallet_bridge_messages::DefaultInstance;
+pub type WithMillauMessagesInstance = ();
 
 impl pallet_bridge_messages::Config<WithMillauMessagesInstance> for Runtime {
 	type Event = Event;
@@ -502,14 +544,17 @@ impl pallet_bridge_messages::Config<WithMillauMessagesInstance> for Runtime {
 	type MessageDeliveryAndDispatchPayment =
 		pallet_bridge_messages::instant_payments::InstantCurrencyPayments<
 			Runtime,
+			(),
 			pallet_balances::Pallet<Runtime>,
 			GetDeliveryConfirmationTransactionFee,
 			RootAccountForPayments,
 		>;
+	type OnMessageAccepted = ();
 	type OnDeliveryConfirmed = ();
 
 	type SourceHeaderChain = crate::millau_messages::Millau;
 	type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch;
+	type BridgedChainId = BridgedChainId;
 }
 
 construct_runtime!(
@@ -518,28 +563,56 @@ construct_runtime!(
 		NodeBlock = opaque::Block,
 		UncheckedExtrinsic = UncheckedExtrinsic
 	{
-		BridgeRialtoPoa: pallet_bridge_eth_poa::<Instance1>::{Pallet, Call, Config, Storage, ValidateUnsigned},
-		BridgeKovan: pallet_bridge_eth_poa::<Instance2>::{Pallet, Call, Config, Storage, ValidateUnsigned},
-		BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::<Instance1>::{Pallet, Call},
-		BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::<Instance2>::{Pallet, Call},
-		BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage},
-		BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event<T>},
-		BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event<T>},
 		System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
-		RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage},
+		Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
+
+		// Must be before session.
+		Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned},
+
 		Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent},
-		Aura: pallet_aura::{Pallet, Config<T>},
-		Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event},
 		Balances: pallet_balances::{Pallet, Call, Storage, Config<T>, Event<T>},
 		TransactionPayment: pallet_transaction_payment::{Pallet, Storage},
-		Sudo: pallet_sudo::{Pallet, Call, Config<T>, Storage, Event<T>},
+
+		// Consensus support.
+		AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config},
 		Session: pallet_session::{Pallet, Call, Storage, Event, Config<T>},
+		Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event},
 		ShiftSessionManager: pallet_shift_session_manager::{Pallet},
+
+		// Eth-PoA chains bridge modules.
+		BridgeRialtoPoa: pallet_bridge_eth_poa::<Instance1>::{Pallet, Call, Config, Storage, ValidateUnsigned},
+		BridgeKovan: pallet_bridge_eth_poa::<Instance2>::{Pallet, Call, Config, Storage, ValidateUnsigned},
+		BridgeRialtoCurrencyExchange: pallet_bridge_currency_exchange::<Instance1>::{Pallet, Call},
+		BridgeKovanCurrencyExchange: pallet_bridge_currency_exchange::<Instance2>::{Pallet, Call},
+
+		// Millau bridge modules.
+		BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage},
+		BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event<T>},
+		BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event<T>, Config<T>},
+
+		// Parachain modules.
+		ParachainsOrigin: polkadot_runtime_parachains::origin::{Pallet, Origin},
+		Configuration: polkadot_runtime_parachains::configuration::{Pallet, Call, Storage, Config<T>},
+		Shared: polkadot_runtime_parachains::shared::{Pallet, Call, Storage},
+		Inclusion: polkadot_runtime_parachains::inclusion::{Pallet, Call, Storage, Event<T>},
+		ParasInherent: polkadot_runtime_parachains::paras_inherent::{Pallet, Call, Storage, Inherent},
+		Scheduler: polkadot_runtime_parachains::scheduler::{Pallet, Storage},
+		Paras: polkadot_runtime_parachains::paras::{Pallet, Call, Storage, Event, Config},
+		Initializer: polkadot_runtime_parachains::initializer::{Pallet, Call, Storage},
+		Dmp: polkadot_runtime_parachains::dmp::{Pallet, Call, Storage},
+		Ump: polkadot_runtime_parachains::ump::{Pallet, Call, Storage, Event},
+		Hrmp: polkadot_runtime_parachains::hrmp::{Pallet, Call, Storage, Event<T>, Config},
+		SessionInfo: polkadot_runtime_parachains::session_info::{Pallet, Storage},
+
+		// Parachain Onboarding Pallets
+		Registrar: polkadot_runtime_common::paras_registrar::{Pallet, Call, Storage, Event<T>},
+		Slots: polkadot_runtime_common::slots::{Pallet, Call, Storage, Event<T>},
+		ParasSudoWrapper: polkadot_runtime_common::paras_sudo_wrapper::{Pallet, Call},
 	}
 );
 
 /// The address format for describing accounts.
-pub type Address = AccountId;
+pub type Address = sp_runtime::MultiAddress<AccountId, ()>;
 /// Block header type as expected by this runtime.
 pub type Header = generic::Header<BlockNumber, Hashing>;
 /// Block type as expected by this runtime.
@@ -570,7 +643,7 @@ pub type Executive = frame_executive::Executive<
 	Block,
 	frame_system::ChainContext<Runtime>,
 	Runtime,
-	AllPalletsWithSystem,
+	AllPallets,
 >;
 
 impl_runtime_apis! {
@@ -700,13 +773,145 @@ impl_runtime_apis! {
 		}
 	}
 
-	impl sp_consensus_aura::AuraApi<Block, AuraId> for Runtime {
-		fn slot_duration() -> sp_consensus_aura::SlotDuration {
-			sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration())
+	impl sp_consensus_babe::BabeApi<Block> for Runtime {
+		fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration {
+			// The choice of `c` parameter (where `1 - c` represents the
+			// probability of a slot being empty), is done in accordance to the
+			// slot duration and expected target block time, for safely
+			// resisting network delays of maximum two seconds.
+			// <https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results>
+			sp_consensus_babe::BabeGenesisConfiguration {
+				slot_duration: Babe::slot_duration(),
+				epoch_length: EpochDuration::get(),
+				c: BABE_GENESIS_EPOCH_CONFIG.c,
+				genesis_authorities: Babe::authorities().to_vec(),
+				randomness: Babe::randomness(),
+				allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots,
+			}
+		}
+
+		fn current_epoch_start() -> sp_consensus_babe::Slot {
+			Babe::current_epoch_start()
 		}
 
-		fn authorities() -> Vec<AuraId> {
-			Aura::authorities()
+		fn current_epoch() -> sp_consensus_babe::Epoch {
+			Babe::current_epoch()
+		}
+
+		fn next_epoch() -> sp_consensus_babe::Epoch {
+			Babe::next_epoch()
+		}
+
+		fn generate_key_ownership_proof(
+			_slot: sp_consensus_babe::Slot,
+			_authority_id: sp_consensus_babe::AuthorityId,
+		) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
+			None
+		}
+
+		fn submit_report_equivocation_unsigned_extrinsic(
+			equivocation_proof: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
+			key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof,
+		) -> Option<()> {
+			let key_owner_proof = key_owner_proof.decode()?;
+
+			Babe::submit_unsigned_equivocation_report(
+				equivocation_proof,
+				key_owner_proof,
+			)
+		}
+	}
+
+	impl polkadot_primitives::v1::ParachainHost<Block, Hash, BlockNumber> for Runtime {
+		fn validators() -> Vec<polkadot_primitives::v1::ValidatorId> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::validators::<Runtime>()
+		}
+
+		fn validator_groups() -> (
+			Vec<Vec<polkadot_primitives::v1::ValidatorIndex>>,
+			polkadot_primitives::v1::GroupRotationInfo<BlockNumber>,
+		) {
+			polkadot_runtime_parachains::runtime_api_impl::v1::validator_groups::<Runtime>()
+		}
+
+		fn availability_cores() -> Vec<polkadot_primitives::v1::CoreState<Hash, BlockNumber>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::availability_cores::<Runtime>()
+		}
+
+		fn persisted_validation_data(
+			para_id: polkadot_primitives::v1::Id,
+			assumption: polkadot_primitives::v1::OccupiedCoreAssumption,
+		)
+			-> Option<polkadot_primitives::v1::PersistedValidationData<Hash, BlockNumber>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::persisted_validation_data::<Runtime>(para_id, assumption)
+		}
+
+		fn check_validation_outputs(
+			para_id: polkadot_primitives::v1::Id,
+			outputs: polkadot_primitives::v1::CandidateCommitments,
+		) -> bool {
+			polkadot_runtime_parachains::runtime_api_impl::v1::check_validation_outputs::<Runtime>(para_id, outputs)
+		}
+
+		fn session_index_for_child() -> polkadot_primitives::v1::SessionIndex {
+			polkadot_runtime_parachains::runtime_api_impl::v1::session_index_for_child::<Runtime>()
+		}
+
+		fn validation_code(
+			para_id: polkadot_primitives::v1::Id,
+			assumption: polkadot_primitives::v1::OccupiedCoreAssumption,
+		)
+			-> Option<polkadot_primitives::v1::ValidationCode> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::validation_code::<Runtime>(para_id, assumption)
+		}
+
+		fn candidate_pending_availability(
+			para_id: polkadot_primitives::v1::Id,
+		) -> Option<polkadot_primitives::v1::CommittedCandidateReceipt<Hash>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::candidate_pending_availability::<Runtime>(para_id)
+		}
+
+		fn candidate_events() -> Vec<polkadot_primitives::v1::CandidateEvent<Hash>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::candidate_events::<Runtime, _>(|ev| {
+				match ev {
+					Event::Inclusion(ev) => {
+						Some(ev)
+					}
+					_ => None,
+				}
+			})
+		}
+
+		fn session_info(index: polkadot_primitives::v1::SessionIndex) -> Option<polkadot_primitives::v1::SessionInfo> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::session_info::<Runtime>(index)
+		}
+
+		fn dmq_contents(
+			recipient: polkadot_primitives::v1::Id,
+		) -> Vec<polkadot_primitives::v1::InboundDownwardMessage<BlockNumber>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::dmq_contents::<Runtime>(recipient)
+		}
+
+		fn inbound_hrmp_channels_contents(
+			recipient: polkadot_primitives::v1::Id
+		) -> BTreeMap<polkadot_primitives::v1::Id, Vec<polkadot_primitives::v1::InboundHrmpMessage<BlockNumber>>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::inbound_hrmp_channels_contents::<Runtime>(recipient)
+		}
+
+		fn validation_code_by_hash(
+			hash: polkadot_primitives::v1::ValidationCodeHash,
+		) -> Option<polkadot_primitives::v1::ValidationCode> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::validation_code_by_hash::<Runtime>(hash)
+		}
+
+		fn on_chain_votes() -> Option<polkadot_primitives::v1::ScrapedOnChainVotes<Hash>> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::on_chain_votes::<Runtime>()
+		}
+	}
+
+	impl sp_authority_discovery::AuthorityDiscoveryApi<Block> for Runtime {
+		fn authorities() -> Vec<AuthorityDiscoveryId> {
+			polkadot_runtime_parachains::runtime_api_impl::v1::relevant_authority_ids::<Runtime>()
 		}
 	}
 
@@ -735,6 +940,10 @@ impl_runtime_apis! {
 	}
 
 	impl fg_primitives::GrandpaApi<Block> for Runtime {
+		fn current_set_id() -> fg_primitives::SetId {
+			Grandpa::current_set_id()
+		}
+
 		fn grandpa_authorities() -> GrandpaAuthorityList {
 			Grandpa::grandpa_authorities()
 		}
@@ -781,20 +990,11 @@ impl_runtime_apis! {
 			begin: bp_messages::MessageNonce,
 			end: bp_messages::MessageNonce,
 		) -> Vec<bp_messages::MessageDetails<Balance>> {
-			(begin..=end).filter_map(|nonce| {
-				let message_data = BridgeMillauMessages::outbound_message_data(lane, nonce)?;
-				let decoded_payload = millau_messages::ToMillauMessagePayload::decode(
-					&mut &message_data.payload[..]
-				).ok()?;
-				Some(bp_messages::MessageDetails {
-					nonce,
-					dispatch_weight: decoded_payload.weight,
-					size: message_data.payload.len() as _,
-					delivery_and_dispatch_fee: message_data.fee,
-					dispatch_fee_payment: decoded_payload.dispatch_fee_payment,
-				})
-			})
-			.collect()
+			bridge_runtime_common::messages_api::outbound_message_details::<
+				Runtime,
+				WithMillauMessagesInstance,
+				WithMillauMessageBridge,
+			>(lane, begin, end)
 		}
 
 		fn latest_received_nonce(lane: bp_messages::LaneId) -> bp_messages::MessageNonce {
@@ -829,18 +1029,18 @@ impl_runtime_apis! {
 			use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList};
 			use frame_support::traits::StorageInfoTrait;
 
+			use pallet_bridge_currency_exchange::benchmarking::Pallet as BridgeCurrencyExchangeBench;
+			use pallet_bridge_messages::benchmarking::Pallet as MessagesBench;
+
+			let mut list = Vec::<BenchmarkList>::new();
+
+			list_benchmark!(list, extra, pallet_bridge_eth_poa, BridgeRialtoPoa);
 			list_benchmark!(
 				list,
 				extra,
-				pallet_bridge_currency_exchange,
-				BridgeCurrencyExchangeBench::<Runtime, KovanCurrencyExchange>
-			);
-			list_benchmark!(
-				list,
-				extra,
-				pallet_bridge_messages,
-				MessagesBench::<Runtime, WithMillauMessagesInstance>
+				pallet_bridge_currency_exchange, BridgeCurrencyExchangeBench::<Runtime, KovanCurrencyExchange>
 			);
+			list_benchmark!(list, extra, pallet_bridge_messages, MessagesBench::<Runtime, WithMillauMessagesInstance>);
 			list_benchmark!(list, extra, pallet_bridge_grandpa, BridgeMillauGrandpa);
 
 			let storage_info = AllPalletsWithSystem::storage_info();
@@ -981,7 +1181,7 @@ impl_runtime_apis! {
 						MessagesProofSize::Minimal(ref size) => vec![0u8; *size as _],
 						_ => vec![],
 					};
-					let call = Call::System(SystemCall::remark(remark));
+					let call = Call::System(SystemCall::remark { remark });
 					let call_weight = call.get_dispatch_info().weight;
 
 					let millau_account_id: bp_millau::AccountId = Default::default();
@@ -1001,14 +1201,12 @@ impl_runtime_apis! {
 						Self::endow_account(&rialto_public.clone().into_account());
 					}
 
-					let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key::<
-						<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
-					>(
+					let make_millau_message_key = |message_key: MessageKey| storage_keys::message_key(
+						<WithMillauMessageBridge as MessageBridge>::BRIDGED_MESSAGES_PALLET_NAME,
 						&message_key.lane_id, message_key.nonce,
 					).0;
-					let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key::<
-						<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
-					>(
+					let make_millau_outbound_lane_data_key = |lane_id| storage_keys::outbound_lane_data_key(
+						<WithMillauMessageBridge as MessageBridge>::BRIDGED_MESSAGES_PALLET_NAME,
 						&lane_id,
 					).0;
 
@@ -1054,9 +1252,8 @@ impl_runtime_apis! {
 
 					prepare_message_delivery_proof::<WithMillauMessageBridge, bp_millau::Hasher, Runtime, (), _, _>(
 						params,
-						|lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key::<
-							<WithMillauMessageBridge as MessageBridge>::BridgedMessagesInstance,
-						>(
+						|lane_id| pallet_bridge_messages::storage_keys::inbound_lane_data_key(
+							<WithMillauMessageBridge as MessageBridge>::BRIDGED_MESSAGES_PALLET_NAME,
 							&lane_id,
 						).0,
 						|state_root| bp_millau::Header::new(
@@ -1082,6 +1279,7 @@ impl_runtime_apis! {
 				}
 			}
 
+			add_benchmark!(params, batches, pallet_bridge_eth_poa, BridgeRialtoPoa);
 			add_benchmark!(
 				params,
 				batches,
@@ -1105,8 +1303,8 @@ impl_runtime_apis! {
 /// Millau account ownership digest from Rialto.
 ///
 /// The byte vector returned by this function should be signed with a Millau account private key.
-/// This way, the owner of `rialto_account_id` on Rialto proves that the Millau account private key
-/// is also under his control.
+/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private
+/// key is also under his control.
 pub fn rialto_to_millau_account_ownership_digest<Call, AccountId, SpecVersion>(
 	millau_call: &Call,
 	rialto_account_id: AccountId,
@@ -1180,6 +1378,7 @@ mod tests {
 			bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT,
 			bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT,
 			bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT,
+			DbWeight::get(),
 		);
 
 		let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add(
@@ -1207,6 +1406,7 @@ mod tests {
 			max_incoming_inbound_lane_data_proof_size,
 			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 			bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+			DbWeight::get(),
 		);
 	}
 
@@ -1253,4 +1453,10 @@ mod tests {
 			additional_amount
 		});
 	}
+
+	#[test]
+	fn call_size() {
+		const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests
+		assert!(core::mem::size_of::<Call>() <= MAX_CALL_SIZE);
+	}
 }
diff --git a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs
index bf97478a0aa204b34e594f6a0c829821f5a0126f..13a1c6b06ec21a215b9d16599540c816a1c8b23b 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs
@@ -31,25 +31,34 @@ use frame_support::{
 	weights::{DispatchClass, Weight},
 	RuntimeDebug,
 };
-use sp_runtime::{traits::Zero, FixedPointNumber, FixedU128};
+use scale_info::TypeInfo;
+use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128};
 use sp_std::{convert::TryFrom, ops::RangeInclusive};
 
 /// Initial value of `MillauToRialtoConversionRate` parameter.
-pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
+pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 =
+	FixedU128::from_inner(FixedU128::DIV);
+/// Initial value of `MillauFeeMultiplier` parameter.
+pub const INITIAL_MILLAU_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV);
 
 parameter_types! {
 	/// Millau to Rialto conversion rate. Initially we treat both tokens as equal.
 	pub storage MillauToRialtoConversionRate: FixedU128 = INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE;
+	/// Fee multiplier value at Millau chain.
+	pub storage MillauFeeMultiplier: FixedU128 = INITIAL_MILLAU_FEE_MULTIPLIER;
 }
 
 /// Message payload for Rialto -> Millau messages.
-pub type ToMillauMessagePayload = messages::source::FromThisChainMessagePayload<WithMillauMessageBridge>;
+pub type ToMillauMessagePayload =
+	messages::source::FromThisChainMessagePayload<WithMillauMessageBridge>;
 
 /// Message verifier for Rialto -> Millau messages.
-pub type ToMillauMessageVerifier = messages::source::FromThisChainMessageVerifier<WithMillauMessageBridge>;
+pub type ToMillauMessageVerifier =
+	messages::source::FromThisChainMessageVerifier<WithMillauMessageBridge>;
 
 /// Message payload for Millau -> Rialto messages.
-pub type FromMillauMessagePayload = messages::target::FromBridgedChainMessagePayload<WithMillauMessageBridge>;
+pub type FromMillauMessagePayload =
+	messages::target::FromBridgedChainMessagePayload<WithMillauMessageBridge>;
 
 /// Encoded Rialto Call as it comes from Millau.
 pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall<crate::Call>;
@@ -59,14 +68,15 @@ pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDi
 	WithMillauMessageBridge,
 	crate::Runtime,
 	pallet_balances::Pallet<Runtime>,
-	pallet_bridge_dispatch::DefaultInstance,
+	(),
 >;
 
 /// Messages proof for Millau -> Rialto messages.
 pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof<bp_millau::Hash>;
 
 /// Messages delivery proof for Rialto -> Millau messages.
-pub type ToMillauMessagesDeliveryProof = messages::source::FromBridgedChainMessagesDeliveryProof<bp_millau::Hash>;
+pub type ToMillauMessagesDeliveryProof =
+	messages::source::FromBridgedChainMessagesDeliveryProof<bp_millau::Hash>;
 
 /// Millau <-> Rialto message bridge.
 #[derive(RuntimeDebug, Clone, Copy)]
@@ -76,14 +86,16 @@ impl MessageBridge for WithMillauMessageBridge {
 	const RELAYER_FEE_PERCENT: u32 = 10;
 	const THIS_CHAIN_ID: ChainId = RIALTO_CHAIN_ID;
 	const BRIDGED_CHAIN_ID: ChainId = MILLAU_CHAIN_ID;
+	const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME;
 
 	type ThisChain = Rialto;
 	type BridgedChain = Millau;
-	type BridgedMessagesInstance = crate::WithMillauMessagesInstance;
 
 	fn bridged_balance_to_this_balance(bridged_balance: bp_millau::Balance) -> bp_rialto::Balance {
-		bp_rialto::Balance::try_from(MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance))
-			.unwrap_or(bp_rialto::Balance::MAX)
+		bp_rialto::Balance::try_from(
+			MillauToRialtoConversionRate::get().saturating_mul_int(bridged_balance),
+		)
+		.unwrap_or(bp_rialto::Balance::MAX)
 	}
 }
 
@@ -128,11 +140,15 @@ impl messages::ThisChainWithMessages for Rialto {
 	}
 
 	fn transaction_payment(transaction: MessageTransaction<Weight>) -> bp_rialto::Balance {
+		// `transaction` may represent transaction from the future, when multiplier value will
+		// be larger, so let's use slightly increased value
+		let multiplier = FixedU128::saturating_from_rational(110, 100)
+			.saturating_mul(pallet_transaction_payment::Pallet::<Runtime>::next_fee_multiplier());
 		// in our testnets, both per-byte fee and weight-to-fee are 1:1
 		messages::transaction_payment(
 			bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic,
 			1,
-			FixedU128::zero(),
+			multiplier,
 			|weight| weight as _,
 			transaction,
 		)
@@ -159,12 +175,15 @@ impl messages::BridgedChainWithMessages for Millau {
 
 	fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive<Weight> {
 		// we don't want to relay too large messages + keep reserve for future upgrades
-		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(bp_millau::max_extrinsic_weight());
+		let upper_limit = messages::target::maximal_incoming_message_dispatch_weight(
+			bp_millau::max_extrinsic_weight(),
+		);
 
-		// we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` function
+		// we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment`
+		// function
 		//
-		// this bridge may be used to deliver all kind of messages, so we're not making any assumptions about
-		// minimal dispatch weight here
+		// this bridge may be used to deliver all kind of messages, so we're not making any
+		// assumptions about minimal dispatch weight here
 
 		0..=upper_limit
 	}
@@ -195,11 +214,14 @@ impl messages::BridgedChainWithMessages for Millau {
 	}
 
 	fn transaction_payment(transaction: MessageTransaction<Weight>) -> bp_millau::Balance {
+		// we don't have a direct access to the value of multiplier at Millau chain
+		// => it is a messages module parameter
+		let multiplier = MillauFeeMultiplier::get();
 		// in our testnets, both per-byte fee and weight-to-fee are 1:1
 		messages::transaction_payment(
 			bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic,
 			1,
-			FixedU128::zero(),
+			multiplier,
 			|weight| weight as _,
 			transaction,
 		)
@@ -221,9 +243,11 @@ impl TargetHeaderChain<ToMillauMessagePayload, bp_millau::AccountId> for Millau
 	fn verify_messages_delivery_proof(
 		proof: Self::MessagesDeliveryProof,
 	) -> Result<(LaneId, InboundLaneData<bp_rialto::AccountId>), Self::Error> {
-		messages::source::verify_messages_delivery_proof::<WithMillauMessageBridge, Runtime, crate::MillauGrandpaInstance>(
-			proof,
-		)
+		messages::source::verify_messages_delivery_proof::<
+			WithMillauMessageBridge,
+			Runtime,
+			crate::MillauGrandpaInstance,
+		>(proof)
 	}
 }
 
@@ -240,15 +264,16 @@ impl SourceHeaderChain<bp_millau::Balance> for Millau {
 		proof: Self::MessagesProof,
 		messages_count: u32,
 	) -> Result<ProvedMessages<Message<bp_millau::Balance>>, Self::Error> {
-		messages::target::verify_messages_proof::<WithMillauMessageBridge, Runtime, crate::MillauGrandpaInstance>(
-			proof,
-			messages_count,
-		)
+		messages::target::verify_messages_proof::<
+			WithMillauMessageBridge,
+			Runtime,
+			crate::MillauGrandpaInstance,
+		>(proof, messages_count)
 	}
 }
 
 /// Rialto -> Millau message lane pallet parameters.
-#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq)]
+#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
 pub enum RialtoToMillauMessagesParameter {
 	/// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`.
 	MillauToRialtoConversionRate(FixedU128),
@@ -257,9 +282,8 @@ pub enum RialtoToMillauMessagesParameter {
 impl MessagesParameter for RialtoToMillauMessagesParameter {
 	fn save(&self) {
 		match *self {
-			RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => {
-				MillauToRialtoConversionRate::set(conversion_rate)
-			}
+			RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) =>
+				MillauToRialtoConversionRate::set(conversion_rate),
 		}
 	}
 }
@@ -274,7 +298,9 @@ mod tests {
 		MessageKey,
 	};
 	use bp_runtime::{derive_account_id, messages::DispatchFeePayment, SourceAccount};
-	use bridge_runtime_common::messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload};
+	use bridge_runtime_common::messages::target::{
+		FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload,
+	};
 	use frame_support::{
 		traits::Currency,
 		weights::{GetDispatchInfo, WeightToFeePolynomial},
@@ -286,12 +312,15 @@ mod tests {
 		// this test actually belongs to the `bridge-runtime-common` crate, but there we have no
 		// mock runtime. Making another one there just for this test, given that both crates
 		// live n single repo is an overkill
-		let mut ext: sp_io::TestExternalities = SystemConfig::default().build_storage::<Runtime>().unwrap().into();
+		let mut ext: sp_io::TestExternalities =
+			SystemConfig::default().build_storage::<Runtime>().unwrap().into();
 		ext.execute_with(|| {
 			let bridge = MILLAU_CHAIN_ID;
-			let call: Call = SystemCall::remark(vec![]).into();
+			let call: Call = SystemCall::remark { remark: vec![] }.into();
 			let dispatch_weight = call.get_dispatch_info().weight;
-			let dispatch_fee = <Runtime as pallet_transaction_payment::Config>::WeightToFee::calc(&dispatch_weight);
+			let dispatch_fee = <Runtime as pallet_transaction_payment::Config>::WeightToFee::calc(
+				&dispatch_weight,
+			);
 			assert!(dispatch_fee > 0);
 
 			// create relayer account with minimal balance
@@ -303,12 +332,13 @@ mod tests {
 			);
 
 			// create dispatch account with minimal balance + dispatch fee
-			let dispatch_account = derive_account_id::<<Runtime as pallet_bridge_dispatch::Config>::SourceChainAccountId>(
-				bridge,
-				SourceAccount::Root,
-			);
+			let dispatch_account = derive_account_id::<
+				<Runtime as pallet_bridge_dispatch::Config>::SourceChainAccountId,
+			>(bridge, SourceAccount::Root);
 			let dispatch_account =
-				<Runtime as pallet_bridge_dispatch::Config>::AccountIdConverter::convert(dispatch_account);
+				<Runtime as pallet_bridge_dispatch::Config>::AccountIdConverter::convert(
+					dispatch_account,
+				);
 			let _ = <pallet_balances::Pallet<Runtime> as Currency<AccountId>>::deposit_creating(
 				&dispatch_account,
 				initial_amount + dispatch_fee,
@@ -318,10 +348,7 @@ mod tests {
 			FromMillauMessageDispatch::dispatch(
 				&relayer_account,
 				DispatchMessage {
-					key: MessageKey {
-						lane_id: Default::default(),
-						nonce: 0,
-					},
+					key: MessageKey { lane_id: Default::default(), nonce: 0 },
 					data: DispatchMessageData {
 						payload: Ok(FromBridgedChainMessagePayload::<WithMillauMessageBridge> {
 							spec_version: VERSION.spec_version,
@@ -337,11 +364,15 @@ mod tests {
 
 			// ensure that fee has been transferred from dispatch to relayer account
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&relayer_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&relayer_account
+				),
 				initial_amount + dispatch_fee,
 			);
 			assert_eq!(
-				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(&dispatch_account),
+				<pallet_balances::Pallet<Runtime> as Currency<AccountId>>::free_balance(
+					&dispatch_account
+				),
 				initial_amount,
 			);
 		});
diff --git a/polkadot/bridges/bin/rialto/runtime/src/parachains.rs b/polkadot/bridges/bin/rialto/runtime/src/parachains.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9a2f85460153cfa07bb0289bb217f9be245bc4a3
--- /dev/null
+++ b/polkadot/bridges/bin/rialto/runtime/src/parachains.rs
@@ -0,0 +1,158 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Parachains support in Rialto runtime.
+
+use crate::{AccountId, Balance, Balances, BlockNumber, Event, Origin, Registrar, Runtime, Slots};
+
+use frame_support::{parameter_types, weights::Weight};
+use frame_system::EnsureRoot;
+use polkadot_primitives::v1::ValidatorIndex;
+use polkadot_runtime_common::{paras_registrar, paras_sudo_wrapper, slots};
+use polkadot_runtime_parachains::{
+	configuration as parachains_configuration, dmp as parachains_dmp, hrmp as parachains_hrmp,
+	inclusion as parachains_inclusion, initializer as parachains_initializer,
+	origin as parachains_origin, paras as parachains_paras,
+	paras_inherent as parachains_paras_inherent, scheduler as parachains_scheduler,
+	session_info as parachains_session_info, shared as parachains_shared, ump as parachains_ump,
+};
+
+/// Special `RewardValidators` that does nothing ;)
+pub struct RewardValidators;
+impl polkadot_runtime_parachains::inclusion::RewardValidators for RewardValidators {
+	fn reward_backing(_: impl IntoIterator<Item = ValidatorIndex>) {}
+	fn reward_bitfields(_: impl IntoIterator<Item = ValidatorIndex>) {}
+}
+
+// all required parachain modules from `polkadot-runtime-parachains` crate
+
+impl parachains_configuration::Config for Runtime {
+	type WeightInfo = parachains_configuration::TestWeightInfo;
+}
+
+impl parachains_dmp::Config for Runtime {}
+
+impl parachains_hrmp::Config for Runtime {
+	type Event = Event;
+	type Origin = Origin;
+	type Currency = Balances;
+}
+
+impl parachains_inclusion::Config for Runtime {
+	type Event = Event;
+	type RewardValidators = RewardValidators;
+	type DisputesHandler = ();
+}
+
+impl parachains_initializer::Config for Runtime {
+	type Randomness = pallet_babe::RandomnessFromOneEpochAgo<Runtime>;
+	type ForceOrigin = EnsureRoot<AccountId>;
+	type WeightInfo = ();
+}
+
+impl parachains_origin::Config for Runtime {}
+
+impl parachains_paras::Config for Runtime {
+	type Origin = Origin;
+	type Event = Event;
+	type WeightInfo = parachains_paras::TestWeightInfo;
+}
+
+impl parachains_paras_inherent::Config for Runtime {}
+
+impl parachains_scheduler::Config for Runtime {}
+
+impl parachains_session_info::Config for Runtime {}
+
+impl parachains_shared::Config for Runtime {}
+
+parameter_types! {
+	pub const FirstMessageFactorPercent: u64 = 100;
+}
+
+impl parachains_ump::Config for Runtime {
+	type Event = Event;
+	type UmpSink = ();
+	type FirstMessageFactorPercent = FirstMessageFactorPercent;
+	type ExecuteOverweightOrigin = EnsureRoot<AccountId>;
+}
+
+// required onboarding pallets. We're not going to use auctions or crowdloans, so they're missing
+
+parameter_types! {
+	pub const ParaDeposit: Balance = 0;
+	pub const DataDepositPerByte: Balance = 0;
+}
+
+impl paras_registrar::Config for Runtime {
+	type Event = Event;
+	type Origin = Origin;
+	type Currency = Balances;
+	type OnSwap = Slots;
+	type ParaDeposit = ParaDeposit;
+	type DataDepositPerByte = DataDepositPerByte;
+	type WeightInfo = paras_registrar::TestWeightInfo;
+}
+
+parameter_types! {
+	pub const LeasePeriod: BlockNumber = 10 * bp_rialto::MINUTES;
+}
+
+impl slots::Config for Runtime {
+	type Event = Event;
+	type Currency = Balances;
+	type Registrar = Registrar;
+	type LeasePeriod = LeasePeriod;
+	type WeightInfo = slots::TestWeightInfo;
+	type LeaseOffset = ();
+}
+
+impl paras_sudo_wrapper::Config for Runtime {}
+
+pub struct ZeroWeights;
+
+impl polkadot_runtime_common::paras_registrar::WeightInfo for ZeroWeights {
+	fn reserve() -> Weight {
+		0
+	}
+	fn register() -> Weight {
+		0
+	}
+	fn force_register() -> Weight {
+		0
+	}
+	fn deregister() -> Weight {
+		0
+	}
+	fn swap() -> Weight {
+		0
+	}
+}
+
+impl polkadot_runtime_common::slots::WeightInfo for ZeroWeights {
+	fn force_lease() -> Weight {
+		0
+	}
+	fn manage_lease_period_start(_c: u32, _t: u32) -> Weight {
+		0
+	}
+	fn clear_all_leases() -> Weight {
+		0
+	}
+	fn trigger_onboard() -> Weight {
+		0
+	}
+}
diff --git a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs
index 77bd288e864812a755c11d533a798a20286d9647..ea5d72f4a69eaedd8b958a8f373a64f264286103 100644
--- a/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs
+++ b/polkadot/bridges/bin/rialto/runtime/src/rialto_poa.rs
@@ -23,8 +23,8 @@ use bp_header_chain::InclusionProofVerifier;
 use frame_support::RuntimeDebug;
 use hex_literal::hex;
 use pallet_bridge_eth_poa::{
-	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy, ValidatorsConfiguration,
-	ValidatorsSource,
+	AuraConfiguration, ChainTime as TChainTime, PruningStrategy as TPruningStrategy,
+	ValidatorsConfiguration, ValidatorsSource,
 };
 use sp_std::prelude::*;
 
@@ -79,11 +79,14 @@ pub fn genesis_header() -> AuraHeader {
 		timestamp: 0,
 		number: 0,
 		author: Default::default(),
-		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
-		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").into(),
+		transactions_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
+		uncles_hash: hex!("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")
+			.into(),
 		extra_data: vec![],
 		state_root: hex!("a992d04c791620ed7ed96555a80cf0568355bb4bee2656f46899a4372f25f248").into(),
-		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").into(),
+		receipts_root: hex!("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+			.into(),
 		log_bloom: Default::default(),
 		gas_used: Default::default(),
 		gas_limit: 0x222222.into(),
@@ -128,12 +131,17 @@ impl InclusionProofVerifier for RialtoBlockchain {
 	type Transaction = RawTransaction;
 	type TransactionInclusionProof = EthereumTransactionInclusionProof;
 
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction> {
-		let is_transaction_finalized =
-			crate::BridgeRialtoPoa::verify_transaction_finalized(proof.block, proof.index, &proof.proof);
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction> {
+		let is_transaction_finalized = crate::BridgeRialtoPoa::verify_transaction_finalized(
+			proof.block,
+			proof.index,
+			&proof.proof,
+		);
 
 		if !is_transaction_finalized {
-			return None;
+			return None
 		}
 
 		proof.proof.get(proof.index as usize).map(|(tx, _)| tx.clone())
diff --git a/polkadot/bridges/bin/runtime-common/Cargo.toml b/polkadot/bridges/bin/runtime-common/Cargo.toml
index 928523af0256a87b626cf934b6d6770ea376994d..4e693f05451c97de2d7452c9e3137976393dc0f6 100644
--- a/polkadot/bridges/bin/runtime-common/Cargo.toml
+++ b/polkadot/bridges/bin/runtime-common/Cargo.toml
@@ -8,7 +8,7 @@ repository = "https://github.com/paritytech/parity-bridges-common/"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive"] }
 ed25519-dalek = { version = "1.0", default-features = false, optional = true }
 hash-db = { version = "0.15.2", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
@@ -24,13 +24,13 @@ pallet-bridge-messages = { path = "../../modules/messages", default-features = f
 
 # Substrate dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/bin/runtime-common/src/lib.rs b/polkadot/bridges/bin/runtime-common/src/lib.rs
index ae7efb4a4196866f61f94a8e627f603f1afc7fb6..66f2c6c3a01f1e8178a73ecdbc67404d90db6ddf 100644
--- a/polkadot/bridges/bin/runtime-common/src/lib.rs
+++ b/polkadot/bridges/bin/runtime-common/src/lib.rs
@@ -19,4 +19,5 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 pub mod messages;
+pub mod messages_api;
 pub mod messages_benchmarking;
diff --git a/polkadot/bridges/bin/runtime-common/src/messages.rs b/polkadot/bridges/bin/runtime-common/src/messages.rs
index 08f766e2368642b2e4c4255512a665987299b003..b34cbb85540d49e2169655d324a0aa614c1e6ad4 100644
--- a/polkadot/bridges/bin/runtime-common/src/messages.rs
+++ b/polkadot/bridges/bin/runtime-common/src/messages.rs
@@ -32,17 +32,20 @@ use bp_runtime::{
 };
 use codec::{Decode, Encode};
 use frame_support::{
-	traits::{Currency, ExistenceRequirement, Instance},
+	traits::{Currency, ExistenceRequirement},
 	weights::{Weight, WeightToFeePolynomial},
 	RuntimeDebug,
 };
 use hash_db::Hasher;
 use scale_info::TypeInfo;
 use sp_runtime::{
-	traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul},
+	traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul, Saturating, Zero},
 	FixedPointNumber, FixedPointOperand, FixedU128,
 };
-use sp_std::{cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec};
+use sp_std::{
+	cmp::PartialOrd, convert::TryFrom, fmt::Debug, marker::PhantomData, ops::RangeInclusive,
+	vec::Vec,
+};
 use sp_trie::StorageProof;
 
 /// Bidirectional message bridge.
@@ -54,16 +57,20 @@ pub trait MessageBridge {
 	const THIS_CHAIN_ID: ChainId;
 	/// Identifier of the Bridged chain.
 	const BRIDGED_CHAIN_ID: ChainId;
+	/// Name of the paired messages pallet instance at the Bridged chain.
+	///
+	/// Should be the name that is used in the `construct_runtime!()` macro.
+	const BRIDGED_MESSAGES_PALLET_NAME: &'static str;
 
 	/// This chain in context of message bridge.
 	type ThisChain: ThisChainWithMessages;
 	/// Bridged chain in context of message bridge.
 	type BridgedChain: BridgedChainWithMessages;
-	/// Instance of the `pallet-bridge-messages` pallet at the Bridged chain.
-	type BridgedMessagesInstance: Instance;
 
 	/// Convert Bridged chain balance into This chain balance.
-	fn bridged_balance_to_this_balance(bridged_balance: BalanceOf<BridgedChain<Self>>) -> BalanceOf<ThisChain<Self>>;
+	fn bridged_balance_to_this_balance(
+		bridged_balance: BalanceOf<BridgedChain<Self>>,
+	) -> BalanceOf<ThisChain<Self>>;
 }
 
 /// Chain that has `pallet-bridge-messages` and `dispatch` modules.
@@ -73,16 +80,23 @@ pub trait ChainWithMessages {
 	/// Accound id on the chain.
 	type AccountId: Encode + Decode;
 	/// Public key of the chain account that may be used to verify signatures.
-	type Signer: Decode;
+	type Signer: Encode + Decode;
 	/// Signature type used on the chain.
-	type Signature: Decode;
+	type Signature: Encode + Decode;
 	/// Type of weight that is used on the chain. This would almost always be a regular
 	/// `frame_support::weight::Weight`. But since the meaning of weight on different chains
 	/// may be different, the `WeightOf<>` construct is used to avoid confusion between
 	/// different weights.
 	type Weight: From<frame_support::weights::Weight> + PartialOrd;
 	/// Type of balances that is used on the chain.
-	type Balance: Encode + Decode + CheckedAdd + CheckedDiv + CheckedMul + PartialOrd + From<u32> + Copy;
+	type Balance: Encode
+		+ Decode
+		+ CheckedAdd
+		+ CheckedDiv
+		+ CheckedMul
+		+ PartialOrd
+		+ From<u32>
+		+ Copy;
 }
 
 /// Message related transaction parameters estimation.
@@ -137,30 +151,40 @@ pub trait BridgedChainWithMessages: ChainWithMessages {
 		message_dispatch_weight: WeightOf<Self>,
 	) -> MessageTransaction<WeightOf<Self>>;
 
-	/// Returns minimal transaction fee that must be paid for given transaction at the Bridged chain.
+	/// Returns minimal transaction fee that must be paid for given transaction at the Bridged
+	/// chain.
 	fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self>;
 }
 
-pub(crate) type ThisChain<B> = <B as MessageBridge>::ThisChain;
-pub(crate) type BridgedChain<B> = <B as MessageBridge>::BridgedChain;
-pub(crate) type HashOf<C> = <C as ChainWithMessages>::Hash;
-pub(crate) type AccountIdOf<C> = <C as ChainWithMessages>::AccountId;
-pub(crate) type SignerOf<C> = <C as ChainWithMessages>::Signer;
-pub(crate) type SignatureOf<C> = <C as ChainWithMessages>::Signature;
-pub(crate) type WeightOf<C> = <C as ChainWithMessages>::Weight;
-pub(crate) type BalanceOf<C> = <C as ChainWithMessages>::Balance;
-
-pub(crate) type CallOf<C> = <C as ThisChainWithMessages>::Call;
+/// This chain in context of message bridge.
+pub type ThisChain<B> = <B as MessageBridge>::ThisChain;
+/// Bridged chain in context of message bridge.
+pub type BridgedChain<B> = <B as MessageBridge>::BridgedChain;
+/// Hash used on the chain.
+pub type HashOf<C> = <C as ChainWithMessages>::Hash;
+/// Account id used on the chain.
+pub type AccountIdOf<C> = <C as ChainWithMessages>::AccountId;
+/// Public key of the chain account that may be used to verify signature.
+pub type SignerOf<C> = <C as ChainWithMessages>::Signer;
+/// Signature type used on the chain.
+pub type SignatureOf<C> = <C as ChainWithMessages>::Signature;
+/// Type of weight that used on the chain.
+pub type WeightOf<C> = <C as ChainWithMessages>::Weight;
+/// Type of balances that is used on the chain.
+pub type BalanceOf<C> = <C as ChainWithMessages>::Balance;
+/// Type of call that is used on this chain.
+pub type CallOf<C> = <C as ThisChainWithMessages>::Call;
 
 /// Raw storage proof type (just raw trie nodes).
 type RawStorageProof = Vec<Vec<u8>>;
 
 /// Compute fee of transaction at runtime where regular transaction payment pallet is being used.
 ///
-/// The value of `multiplier` parameter is the expected value of `pallet_transaction_payment::NextFeeMultiplier`
-/// at the moment when transaction is submitted. If you're charging this payment in advance (and that's what
-/// happens with delivery and confirmation transaction in this crate), then there's a chance that the actual
-/// fee will be larger than what is paid in advance. So the value must be chosen carefully.
+/// The value of `multiplier` parameter is the expected value of
+/// `pallet_transaction_payment::NextFeeMultiplier` at the moment when transaction is submitted. If
+/// you're charging this payment in advance (and that's what happens with delivery and confirmation
+/// transaction in this crate), then there's a chance that the actual fee will be larger than what
+/// is paid in advance. So the value must be chosen carefully.
 pub fn transaction_payment<Balance: AtLeast32BitUnsigned + FixedPointOperand>(
 	base_extrinsic_weight: Weight,
 	per_byte_fee: Balance,
@@ -223,7 +247,8 @@ pub mod source {
 	}
 
 	/// 'Parsed' message delivery proof - inbound lane id and its state.
-	pub type ParsedMessagesDeliveryProofFromBridgedChain<B> = (LaneId, InboundLaneData<AccountIdOf<ThisChain<B>>>);
+	pub type ParsedMessagesDeliveryProofFromBridgedChain<B> =
+		(LaneId, InboundLaneData<AccountIdOf<ThisChain<B>>>);
 
 	/// Message verifier that is doing all basic checks.
 	///
@@ -235,19 +260,30 @@ pub mod source {
 	/// Following checks are made:
 	///
 	/// - message is rejected if its lane is currently blocked;
-	/// - message is rejected if there are too many pending (undelivered) messages at the outbound lane;
-	/// - check that the sender has rights to dispatch the call on target chain using provided dispatch origin;
+	/// - message is rejected if there are too many pending (undelivered) messages at the outbound
+	///   lane;
+	/// - check that the sender has rights to dispatch the call on target chain using provided
+	///   dispatch origin;
 	/// - check that the sender has paid enough funds for both message delivery and dispatch.
 	#[derive(RuntimeDebug)]
 	pub struct FromThisChainMessageVerifier<B>(PhantomData<B>);
 
-	pub(crate) const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled.";
-	pub(crate) const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane.";
-	pub(crate) const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin.";
-	pub(crate) const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane.";
-
-	impl<B> LaneMessageVerifier<AccountIdOf<ThisChain<B>>, FromThisChainMessagePayload<B>, BalanceOf<ThisChain<B>>>
-		for FromThisChainMessageVerifier<B>
+	/// The error message returned from LaneMessageVerifier when outbound lane is disabled.
+	pub const OUTBOUND_LANE_DISABLED: &str = "The outbound message lane is disabled.";
+	/// The error message returned from LaneMessageVerifier when too many pending messages at the
+	/// lane.
+	pub const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane.";
+	/// The error message returned from LaneMessageVerifier when call origin is mismatch.
+	pub const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin.";
+	/// The error message returned from LaneMessageVerifier when the message fee is too low.
+	pub const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane.";
+
+	impl<B>
+		LaneMessageVerifier<
+			AccountIdOf<ThisChain<B>>,
+			FromThisChainMessagePayload<B>,
+			BalanceOf<ThisChain<B>>,
+		> for FromThisChainMessageVerifier<B>
 	where
 		B: MessageBridge,
 		AccountIdOf<ThisChain<B>>: PartialEq + Clone,
@@ -263,7 +299,7 @@ pub mod source {
 		) -> Result<(), Self::Error> {
 			// reject message if lane is blocked
 			if !ThisChain::<B>::is_outbound_lane_enabled(lane) {
-				return Err(OUTBOUND_LANE_DISABLED);
+				return Err(OUTBOUND_LANE_DISABLED)
 			}
 
 			// reject message if there are too many pending messages at this lane
@@ -272,19 +308,20 @@ pub mod source {
 				.latest_generated_nonce
 				.saturating_sub(lane_outbound_data.latest_received_nonce);
 			if pending_messages > max_pending_messages {
-				return Err(TOO_MANY_PENDING_MESSAGES);
+				return Err(TOO_MANY_PENDING_MESSAGES)
 			}
 
 			// Do the dispatch-specific check. We assume that the target chain uses
 			// `Dispatch`, so we verify the message accordingly.
-			pallet_bridge_dispatch::verify_message_origin(submitter, payload).map_err(|_| BAD_ORIGIN)?;
+			pallet_bridge_dispatch::verify_message_origin(submitter, payload)
+				.map_err(|_| BAD_ORIGIN)?;
 
 			let minimal_fee_in_this_tokens =
 				estimate_message_dispatch_and_delivery_fee::<B>(payload, B::RELAYER_FEE_PERCENT)?;
 
 			// compare with actual fee paid
 			if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens {
-				return Err(TOO_LOW_FEE);
+				return Err(TOO_LOW_FEE)
 			}
 
 			Ok(())
@@ -306,13 +343,13 @@ pub mod source {
 	) -> Result<(), &'static str> {
 		let weight_limits = BridgedChain::<B>::message_weight_limits(&payload.call);
 		if !weight_limits.contains(&payload.weight.into()) {
-			return Err("Incorrect message weight declared");
+			return Err("Incorrect message weight declared")
 		}
 
 		// The maximal size of extrinsic at Substrate-based chain depends on the
-		// `frame_system::Config::MaximumBlockLength` and `frame_system::Config::AvailableBlockRatio`
-		// constants. This check is here to be sure that the lane won't stuck because message is too
-		// large to fit into delivery transaction.
+		// `frame_system::Config::MaximumBlockLength` and
+		// `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that
+		// the lane won't stuck because message is too large to fit into delivery transaction.
 		//
 		// **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not
 		// the message itself. The proof is always larger than the message. But unless chain state
@@ -320,16 +357,17 @@ pub mod source {
 		// transaction also contains signatures and signed extensions. Because of this, we reserve
 		// 1/3 of the the maximal extrinsic weight for this data.
 		if payload.call.len() > maximal_message_size::<B>() as usize {
-			return Err("The message is too large to be sent over the lane");
+			return Err("The message is too large to be sent over the lane")
 		}
 
 		Ok(())
 	}
 
-	/// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged chain.
+	/// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged
+	/// chain.
 	///
-	/// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional conversions.
-	/// Returns `None` if overflow has happened.
+	/// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional
+	/// conversions. Returns `None` if overflow has happened.
 	pub fn estimate_message_dispatch_and_delivery_fee<B: MessageBridge>(
 		payload: &FromThisChainMessagePayload<B>,
 		relayer_fee_percent: u32,
@@ -338,25 +376,23 @@ pub mod source {
 		//
 		// if we're going to pay dispatch fee at the target chain, then we don't include weight
 		// of the message dispatch in the delivery transaction cost
-		let pay_dispatch_fee_at_target_chain = payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
+		let pay_dispatch_fee_at_target_chain =
+			payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
 		let delivery_transaction = BridgedChain::<B>::estimate_delivery_transaction(
-			&payload.call,
+			&payload.encode(),
 			pay_dispatch_fee_at_target_chain,
-			if pay_dispatch_fee_at_target_chain {
-				0.into()
-			} else {
-				payload.weight.into()
-			},
+			if pay_dispatch_fee_at_target_chain { 0.into() } else { payload.weight.into() },
 		);
 		let delivery_transaction_fee = BridgedChain::<B>::transaction_payment(delivery_transaction);
 
 		// the fee (in This tokens) of all transactions that are made on This chain
 		let confirmation_transaction = ThisChain::<B>::estimate_delivery_confirmation_transaction();
-		let confirmation_transaction_fee = ThisChain::<B>::transaction_payment(confirmation_transaction);
+		let confirmation_transaction_fee =
+			ThisChain::<B>::transaction_payment(confirmation_transaction);
 
 		// minimal fee (in This tokens) is a sum of all required fees
-		let minimal_fee =
-			B::bridged_balance_to_this_balance(delivery_transaction_fee).checked_add(&confirmation_transaction_fee);
+		let minimal_fee = B::bridged_balance_to_this_balance(delivery_transaction_fee)
+			.checked_add(&confirmation_transaction_fee);
 
 		// before returning, add extra fee that is paid to the relayer (relayer interest)
 		minimal_fee
@@ -377,14 +413,14 @@ pub mod source {
 	) -> Result<ParsedMessagesDeliveryProofFromBridgedChain<B>, &'static str>
 	where
 		ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
-		HashOf<BridgedChain<B>>:
-			Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
+		HashOf<BridgedChain<B>>: Into<
+			bp_runtime::HashOf<
+				<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain,
+			>,
+		>,
 	{
-		let FromBridgedChainMessagesDeliveryProof {
-			bridged_header_hash,
-			storage_proof,
-			lane,
-		} = proof;
+		let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } =
+			proof;
 		pallet_bridge_grandpa::Pallet::<ThisRuntime, GrandpaInstance>::parse_finalized_storage_proof(
 			bridged_header_hash.into(),
 			StorageProof::new(storage_proof),
@@ -392,7 +428,7 @@ pub mod source {
 				// Messages delivery proof is just proof of single storage key read => any error
 				// is fatal.
 				let storage_inbound_lane_data_key =
-					pallet_bridge_messages::storage_keys::inbound_lane_data_key::<B::BridgedMessagesInstance>(&lane);
+					pallet_bridge_messages::storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane);
 				let raw_inbound_lane_data = storage
 					.read_value(storage_inbound_lane_data_key.0.as_ref())
 					.map_err(|_| "Failed to read inbound lane state from storage proof")?
@@ -469,14 +505,13 @@ pub mod target {
 	impl<DecodedCall> FromBridgedChainEncodedMessageCall<DecodedCall> {
 		/// Create encoded call.
 		pub fn new(encoded_call: Vec<u8>) -> Self {
-			FromBridgedChainEncodedMessageCall {
-				encoded_call,
-				_marker: PhantomData::default(),
-			}
+			FromBridgedChainEncodedMessageCall { encoded_call, _marker: PhantomData::default() }
 		}
 	}
 
-	impl<DecodedCall: Decode> From<FromBridgedChainEncodedMessageCall<DecodedCall>> for Result<DecodedCall, ()> {
+	impl<DecodedCall: Decode> From<FromBridgedChainEncodedMessageCall<DecodedCall>>
+		for Result<DecodedCall, ()>
+	{
 		fn from(encoded_call: FromBridgedChainEncodedMessageCall<DecodedCall>) -> Self {
 			DecodedCall::decode(&mut &encoded_call.encoded_call[..]).map_err(drop)
 		}
@@ -492,20 +527,24 @@ pub mod target {
 		MessageDispatch<AccountIdOf<ThisChain<B>>, BalanceOf<BridgedChain<B>>>
 		for FromBridgedChainMessageDispatch<B, ThisRuntime, ThisCurrency, ThisDispatchInstance>
 	where
-		ThisDispatchInstance: frame_support::traits::Instance,
-		ThisRuntime: pallet_bridge_dispatch::Config<ThisDispatchInstance, MessageId = (LaneId, MessageNonce)>
-			+ pallet_transaction_payment::Config,
+		BalanceOf<ThisChain<B>>: Saturating + FixedPointOperand,
+		ThisDispatchInstance: 'static,
+		ThisRuntime: pallet_bridge_dispatch::Config<
+				ThisDispatchInstance,
+				BridgeMessageId = (LaneId, MessageNonce),
+			> + pallet_transaction_payment::Config,
 		<ThisRuntime as pallet_transaction_payment::Config>::OnChargeTransaction:
-			pallet_transaction_payment::OnChargeTransaction<ThisRuntime, Balance = BalanceOf<ThisChain<B>>>,
+			pallet_transaction_payment::OnChargeTransaction<
+				ThisRuntime,
+				Balance = BalanceOf<ThisChain<B>>,
+			>,
 		ThisCurrency: Currency<AccountIdOf<ThisChain<B>>, Balance = BalanceOf<ThisChain<B>>>,
-		<ThisRuntime as pallet_bridge_dispatch::Config<ThisDispatchInstance>>::Event: From<
-			pallet_bridge_dispatch::RawEvent<(LaneId, MessageNonce), AccountIdOf<ThisChain<B>>, ThisDispatchInstance>,
-		>,
-		pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>: bp_message_dispatch::MessageDispatch<
-			AccountIdOf<ThisChain<B>>,
-			(LaneId, MessageNonce),
-			Message = FromBridgedChainMessagePayload<B>,
-		>,
+		pallet_bridge_dispatch::Pallet<ThisRuntime, ThisDispatchInstance>:
+			bp_message_dispatch::MessageDispatch<
+				AccountIdOf<ThisChain<B>>,
+				(LaneId, MessageNonce),
+				Message = FromBridgedChainMessagePayload<B>,
+			>,
 	{
 		type DispatchPayload = FromBridgedChainMessagePayload<B>;
 
@@ -526,13 +565,22 @@ pub mod target {
 				message_id,
 				message.data.payload.map_err(drop),
 				|dispatch_origin, dispatch_weight| {
-					ThisCurrency::transfer(
-						dispatch_origin,
-						relayer_account,
-						ThisRuntime::WeightToFee::calc(&dispatch_weight),
-						ExistenceRequirement::AllowDeath,
-					)
-					.map_err(drop)
+					let unadjusted_weight_fee = ThisRuntime::WeightToFee::calc(&dispatch_weight);
+					let fee_multiplier =
+						pallet_transaction_payment::Pallet::<ThisRuntime>::next_fee_multiplier();
+					let adjusted_weight_fee =
+						fee_multiplier.saturating_mul_int(unadjusted_weight_fee);
+					if !adjusted_weight_fee.is_zero() {
+						ThisCurrency::transfer(
+							dispatch_origin,
+							relayer_account,
+							adjusted_weight_fee,
+							ExistenceRequirement::AllowDeath,
+						)
+						.map_err(drop)
+					} else {
+						Ok(())
+					}
 				},
 			)
 		}
@@ -559,9 +607,11 @@ pub mod target {
 	) -> Result<ProvedMessages<Message<BalanceOf<BridgedChain<B>>>>, &'static str>
 	where
 		ThisRuntime: pallet_bridge_grandpa::Config<GrandpaInstance>,
-		ThisRuntime: pallet_bridge_messages::Config<B::BridgedMessagesInstance>,
-		HashOf<BridgedChain<B>>:
-			Into<bp_runtime::HashOf<<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain>>,
+		HashOf<BridgedChain<B>>: Into<
+			bp_runtime::HashOf<
+				<ThisRuntime as pallet_bridge_grandpa::Config<GrandpaInstance>>::BridgedChain,
+			>,
+		>,
 	{
 		verify_messages_proof_with_parser::<B, _, _>(
 			proof,
@@ -596,12 +646,13 @@ pub mod target {
 		fn from(err: MessageProofError) -> &'static str {
 			match err {
 				MessageProofError::Empty => "Messages proof is empty",
-				MessageProofError::MessagesCountMismatch => "Declared messages count doesn't match actual value",
+				MessageProofError::MessagesCountMismatch =>
+					"Declared messages count doesn't match actual value",
 				MessageProofError::MissingRequiredMessage => "Message is missing from the proof",
-				MessageProofError::FailedToDecodeMessage => "Failed to decode message from the proof",
-				MessageProofError::FailedToDecodeOutboundLaneState => {
-					"Failed to decode outbound lane data from the proof"
-				}
+				MessageProofError::FailedToDecodeMessage =>
+					"Failed to decode message from the proof",
+				MessageProofError::FailedToDecodeOutboundLaneState =>
+					"Failed to decode outbound lane data from the proof",
 				MessageProofError::Custom(err) => err,
 			}
 		}
@@ -624,14 +675,16 @@ pub mod target {
 	{
 		fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option<Vec<u8>> {
 			let storage_outbound_lane_data_key =
-				pallet_bridge_messages::storage_keys::outbound_lane_data_key::<B::BridgedMessagesInstance>(lane_id);
-			self.storage
-				.read_value(storage_outbound_lane_data_key.0.as_ref())
-				.ok()?
+				pallet_bridge_messages::storage_keys::outbound_lane_data_key(
+					B::BRIDGED_MESSAGES_PALLET_NAME,
+					lane_id,
+				);
+			self.storage.read_value(storage_outbound_lane_data_key.0.as_ref()).ok()?
 		}
 
 		fn read_raw_message(&self, message_key: &MessageKey) -> Option<Vec<u8>> {
-			let storage_message_key = pallet_bridge_messages::storage_keys::message_key::<B::BridgedMessagesInstance>(
+			let storage_message_key = pallet_bridge_messages::storage_keys::message_key(
+				B::BRIDGED_MESSAGES_PALLET_NAME,
 				&message_key.lane_id,
 				message_key.nonce,
 			);
@@ -646,7 +699,8 @@ pub mod target {
 		build_parser: BuildParser,
 	) -> Result<ProvedMessages<Message<BalanceOf<BridgedChain<B>>>>, MessageProofError>
 	where
-		BuildParser: FnOnce(HashOf<BridgedChain<B>>, RawStorageProof) -> Result<Parser, MessageProofError>,
+		BuildParser:
+			FnOnce(HashOf<BridgedChain<B>>, RawStorageProof) -> Result<Parser, MessageProofError>,
 		Parser: MessageProofParser,
 	{
 		let FromBridgedChainMessagesProof {
@@ -658,18 +712,19 @@ pub mod target {
 		} = proof;
 
 		// receiving proofs where end < begin is ok (if proof includes outbound lane state)
-		let messages_in_the_proof = if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) {
-			// let's check that the user (relayer) has passed correct `messages_count`
-			// (this bounds maximal capacity of messages vec below)
-			let messages_in_the_proof = nonces_difference.saturating_add(1);
-			if messages_in_the_proof != MessageNonce::from(messages_count) {
-				return Err(MessageProofError::MessagesCountMismatch);
-			}
+		let messages_in_the_proof =
+			if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) {
+				// let's check that the user (relayer) has passed correct `messages_count`
+				// (this bounds maximal capacity of messages vec below)
+				let messages_in_the_proof = nonces_difference.saturating_add(1);
+				if messages_in_the_proof != MessageNonce::from(messages_count) {
+					return Err(MessageProofError::MessagesCountMismatch)
+				}
 
-			messages_in_the_proof
-		} else {
-			0
-		};
+				messages_in_the_proof
+			} else {
+				0
+			};
 
 		let parser = build_parser(bridged_header_hash, storage_proof)?;
 
@@ -683,20 +738,15 @@ pub mod target {
 			let raw_message_data = parser
 				.read_raw_message(&message_key)
 				.ok_or(MessageProofError::MissingRequiredMessage)?;
-			let message_data = MessageData::<BalanceOf<BridgedChain<B>>>::decode(&mut &raw_message_data[..])
-				.map_err(|_| MessageProofError::FailedToDecodeMessage)?;
-			messages.push(Message {
-				key: message_key,
-				data: message_data,
-			});
+			let message_data =
+				MessageData::<BalanceOf<BridgedChain<B>>>::decode(&mut &raw_message_data[..])
+					.map_err(|_| MessageProofError::FailedToDecodeMessage)?;
+			messages.push(Message { key: message_key, data: message_data });
 		}
 
 		// Now let's check if proof contains outbound lane state proof. It is optional, so we
 		// simply ignore `read_value` errors and missing value.
-		let mut proved_lane_messages = ProvedLaneMessages {
-			lane_state: None,
-			messages,
-		};
+		let mut proved_lane_messages = ProvedLaneMessages { lane_state: None, messages };
 		let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane);
 		if let Some(raw_outbound_lane_data) = raw_outbound_lane_data {
 			proved_lane_messages.lane_state = Some(
@@ -707,7 +757,7 @@ pub mod target {
 
 		// Now we may actually check if the proof is empty or not.
 		if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() {
-			return Err(MessageProofError::Empty);
+			return Err(MessageProofError::Empty)
 		}
 
 		// We only support single lane messages in this schema
@@ -733,7 +783,8 @@ mod tests {
 	const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048;
 	const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024;
 
-	/// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from BridgedChain;
+	/// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from
+	/// BridgedChain;
 	#[derive(Debug, PartialEq, Eq)]
 	struct OnThisChainBridge;
 
@@ -741,17 +792,20 @@ mod tests {
 		const RELAYER_FEE_PERCENT: u32 = 10;
 		const THIS_CHAIN_ID: ChainId = *b"this";
 		const BRIDGED_CHAIN_ID: ChainId = *b"brdg";
+		const BRIDGED_MESSAGES_PALLET_NAME: &'static str = "";
 
 		type ThisChain = ThisChain;
 		type BridgedChain = BridgedChain;
-		type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance;
 
-		fn bridged_balance_to_this_balance(bridged_balance: BridgedChainBalance) -> ThisChainBalance {
+		fn bridged_balance_to_this_balance(
+			bridged_balance: BridgedChainBalance,
+		) -> ThisChainBalance {
 			ThisChainBalance(bridged_balance.0 * BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE as u32)
 		}
 	}
 
-	/// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from ThisChain;
+	/// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from
+	/// ThisChain;
 	#[derive(Debug, PartialEq, Eq)]
 	struct OnBridgedChainBridge;
 
@@ -759,10 +813,10 @@ mod tests {
 		const RELAYER_FEE_PERCENT: u32 = 20;
 		const THIS_CHAIN_ID: ChainId = *b"brdg";
 		const BRIDGED_CHAIN_ID: ChainId = *b"this";
+		const BRIDGED_MESSAGES_PALLET_NAME: &'static str = "";
 
 		type ThisChain = BridgedChain;
 		type BridgedChain = ThisChain;
-		type BridgedMessagesInstance = pallet_bridge_messages::DefaultInstance;
 
 		fn bridged_balance_to_this_balance(_this_balance: ThisChainBalance) -> BridgedChainBalance {
 			unreachable!()
@@ -886,7 +940,9 @@ mod tests {
 		}
 
 		fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
-			ThisChainBalance(transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32)
+			ThisChainBalance(
+				transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32,
+			)
 		}
 	}
 
@@ -907,7 +963,9 @@ mod tests {
 			unreachable!()
 		}
 
-		fn transaction_payment(_transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
+		fn transaction_payment(
+			_transaction: MessageTransaction<WeightOf<Self>>,
+		) -> BalanceOf<Self> {
 			unreachable!()
 		}
 	}
@@ -938,7 +996,9 @@ mod tests {
 			unreachable!()
 		}
 
-		fn transaction_payment(_transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
+		fn transaction_payment(
+			_transaction: MessageTransaction<WeightOf<Self>>,
+		) -> BalanceOf<Self> {
 			unreachable!()
 		}
 	}
@@ -949,7 +1009,8 @@ mod tests {
 		}
 
 		fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive<Self::Weight> {
-			let begin = std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight);
+			let begin =
+				std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight);
 			begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT
 		}
 
@@ -965,7 +1026,9 @@ mod tests {
 		}
 
 		fn transaction_payment(transaction: MessageTransaction<WeightOf<Self>>) -> BalanceOf<Self> {
-			BridgedChainBalance(transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32)
+			BridgedChainBalance(
+				transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32,
+			)
 		}
 	}
 
@@ -976,19 +1039,22 @@ mod tests {
 	#[test]
 	fn message_from_bridged_chain_is_decoded() {
 		// the message is encoded on the bridged chain
-		let message_on_bridged_chain = source::FromThisChainMessagePayload::<OnBridgedChainBridge> {
-			spec_version: 1,
-			weight: 100,
-			origin: bp_message_dispatch::CallOrigin::SourceRoot,
-			dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
-			call: ThisChainCall::Transfer.encode(),
-		}
-		.encode();
+		let message_on_bridged_chain =
+			source::FromThisChainMessagePayload::<OnBridgedChainBridge> {
+				spec_version: 1,
+				weight: 100,
+				origin: bp_message_dispatch::CallOrigin::SourceRoot,
+				dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
+				call: ThisChainCall::Transfer.encode(),
+			}
+			.encode();
 
 		// and sent to this chain where it is decoded
 		let message_on_this_chain =
-			target::FromBridgedChainMessagePayload::<OnThisChainBridge>::decode(&mut &message_on_bridged_chain[..])
-				.unwrap();
+			target::FromBridgedChainMessagePayload::<OnThisChainBridge>::decode(
+				&mut &message_on_bridged_chain[..],
+			)
+			.unwrap();
 		assert_eq!(
 			message_on_this_chain,
 			target::FromBridgedChainMessagePayload::<OnThisChainBridge> {
@@ -1007,7 +1073,8 @@ mod tests {
 	const TEST_LANE_ID: &LaneId = b"test";
 	const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32;
 
-	fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload<OnThisChainBridge> {
+	fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload<OnThisChainBridge>
+	{
 		source::FromThisChainMessagePayload::<OnThisChainBridge> {
 			spec_version: 1,
 			weight: 100,
@@ -1036,11 +1103,14 @@ mod tests {
 		// let's check if estimation is less than hardcoded, if dispatch is paid at target chain
 		let mut payload_with_pay_on_target = regular_outbound_message_payload();
 		payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
-		let fee_at_source = source::estimate_message_dispatch_and_delivery_fee::<OnThisChainBridge>(
-			&payload_with_pay_on_target,
-			OnThisChainBridge::RELAYER_FEE_PERCENT,
-		)
-		.expect("estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message");
+		let fee_at_source =
+			source::estimate_message_dispatch_and_delivery_fee::<OnThisChainBridge>(
+				&payload_with_pay_on_target,
+				OnThisChainBridge::RELAYER_FEE_PERCENT,
+			)
+			.expect(
+				"estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message",
+			);
 		assert!(
 			fee_at_source < EXPECTED_MINIMAL_FEE.into(),
 			"Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}",
@@ -1059,16 +1129,14 @@ mod tests {
 			),
 			Err(source::TOO_LOW_FEE)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Root,
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Root,
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1103,16 +1171,14 @@ mod tests {
 			),
 			Err(source::BAD_ORIGIN)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Root,
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Root,
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1137,16 +1203,14 @@ mod tests {
 			),
 			Err(source::BAD_ORIGIN)
 		);
-		assert!(
-			source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
-				&Sender::Signed(ThisChainAccountId(1)),
-				&ThisChainBalance(1_000_000),
-				TEST_LANE_ID,
-				&test_lane_outbound_data(),
-				&payload,
-			)
-			.is_ok(),
-		);
+		assert!(source::FromThisChainMessageVerifier::<OnThisChainBridge>::verify_message(
+			&Sender::Signed(ThisChainAccountId(1)),
+			&ThisChainBalance(1_000_000),
+			TEST_LANE_ID,
+			&test_lane_outbound_data(),
+			&payload,
+		)
+		.is_ok(),);
 	}
 
 	#[test]
@@ -1183,64 +1247,58 @@ mod tests {
 
 	#[test]
 	fn verify_chain_message_rejects_message_with_too_small_declared_weight() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: 5,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![1, 2, 3, 4, 5, 6],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_rejects_message_with_too_large_declared_weight() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![1, 2, 3, 4, 5, 6],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_rejects_message_too_large_message() {
-		assert!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
+		assert!(source::verify_chain_message::<OnThisChainBridge>(
+			&source::FromThisChainMessagePayload::<OnThisChainBridge> {
 				spec_version: 1,
 				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
 				origin: bp_message_dispatch::CallOrigin::SourceRoot,
 				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 				call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as usize + 1],
-			},)
-			.is_err()
-		);
+			},
+		)
+		.is_err());
 	}
 
 	#[test]
 	fn verify_chain_message_accepts_maximal_message() {
 		assert_eq!(
-			source::verify_chain_message::<OnThisChainBridge>(&source::FromThisChainMessagePayload::<
-				OnThisChainBridge,
-			> {
-				spec_version: 1,
-				weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
-				origin: bp_message_dispatch::CallOrigin::SourceRoot,
-				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
-				call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as _],
-			},),
+			source::verify_chain_message::<OnThisChainBridge>(
+				&source::FromThisChainMessagePayload::<OnThisChainBridge> {
+					spec_version: 1,
+					weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT,
+					origin: bp_message_dispatch::CallOrigin::SourceRoot,
+					dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
+					call: vec![0; source::maximal_message_size::<OnThisChainBridge>() as _],
+				},
+			),
 			Ok(()),
 		);
 	}
@@ -1332,13 +1390,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_required_message_is_missing() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(10), 10, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(10),
+				10,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: 1..=5,
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::MissingRequiredMessage),
 		);
 	}
@@ -1346,13 +1406,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_message_decode_fails() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(10), 10, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(10),
+				10,
+				|_, _| Ok(TestMessageProofParser {
 					failing: true,
 					messages: 1..=10,
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::FailedToDecodeMessage),
 		);
 	}
@@ -1360,8 +1422,10 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: true,
 					messages: no_messages_range(),
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1369,8 +1433,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::FailedToDecodeOutboundLaneState),
 		);
 	}
@@ -1378,13 +1442,15 @@ mod tests {
 	#[test]
 	fn message_proof_is_rejected_if_it_is_empty() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: no_messages_range(),
 					outbound_lane_data: None,
-				}
-			),),
+				}),
+			),
 			Err(target::MessageProofError::Empty),
 		);
 	}
@@ -1392,8 +1458,10 @@ mod tests {
 	#[test]
 	fn non_empty_message_proof_without_messages_is_accepted() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(0), 0, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(0),
+				0,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: no_messages_range(),
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1401,8 +1469,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Ok(vec![(
 				Default::default(),
 				ProvedLaneMessages {
@@ -1422,8 +1490,10 @@ mod tests {
 	#[test]
 	fn non_empty_message_proof_is_accepted() {
 		assert_eq!(
-			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(messages_proof(1), 1, |_, _| Ok(
-				TestMessageProofParser {
+			target::verify_messages_proof_with_parser::<OnThisChainBridge, _, _>(
+				messages_proof(1),
+				1,
+				|_, _| Ok(TestMessageProofParser {
 					failing: false,
 					messages: 1..=1,
 					outbound_lane_data: Some(OutboundLaneData {
@@ -1431,8 +1501,8 @@ mod tests {
 						latest_received_nonce: 1,
 						latest_generated_nonce: 1,
 					}),
-				}
-			),),
+				}),
+			),
 			Ok(vec![(
 				Default::default(),
 				ProvedLaneMessages {
@@ -1442,14 +1512,8 @@ mod tests {
 						latest_generated_nonce: 1,
 					}),
 					messages: vec![Message {
-						key: MessageKey {
-							lane_id: Default::default(),
-							nonce: 1
-						},
-						data: MessageData {
-							payload: 1u64.encode(),
-							fee: BridgedChainBalance(0)
-						},
+						key: MessageKey { lane_id: Default::default(), nonce: 1 },
+						data: MessageData { payload: 1u64.encode(), fee: BridgedChainBalance(0) },
 					}],
 				},
 			)]
@@ -1488,10 +1552,7 @@ mod tests {
 				10,
 				FixedU128::zero(),
 				|weight| weight,
-				MessageTransaction {
-					size: 50,
-					dispatch_weight: 777
-				},
+				MessageTransaction { size: 50, dispatch_weight: 777 },
 			),
 			100 + 50 * 10,
 		);
@@ -1507,10 +1568,7 @@ mod tests {
 				10,
 				FixedU128::one(),
 				|weight| weight,
-				MessageTransaction {
-					size: 50,
-					dispatch_weight: 777
-				},
+				MessageTransaction { size: 50, dispatch_weight: 777 },
 			),
 			100 + 50 * 10 + 777,
 		);
diff --git a/polkadot/bridges/bin/runtime-common/src/messages_api.rs b/polkadot/bridges/bin/runtime-common/src/messages_api.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b09a88e62795982c64ffc99a07debff08e9eb24c
--- /dev/null
+++ b/polkadot/bridges/bin/runtime-common/src/messages_api.rs
@@ -0,0 +1,51 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Helpers for implementing various message-related runtime API mthods.
+
+use crate::messages::{source::FromThisChainMessagePayload, MessageBridge};
+
+use bp_messages::{LaneId, MessageDetails, MessageNonce};
+use codec::Decode;
+use sp_std::vec::Vec;
+
+/// Implementation of the `To*OutboundLaneApi::message_details`.
+pub fn outbound_message_details<Runtime, MessagesPalletInstance, BridgeConfig>(
+	lane: LaneId,
+	begin: MessageNonce,
+	end: MessageNonce,
+) -> Vec<MessageDetails<Runtime::OutboundMessageFee>>
+where
+	Runtime: pallet_bridge_messages::Config<MessagesPalletInstance>,
+	MessagesPalletInstance: 'static,
+	BridgeConfig: MessageBridge,
+{
+	(begin..=end)
+		.filter_map(|nonce| {
+			let message_data =
+				pallet_bridge_messages::Pallet::<Runtime, MessagesPalletInstance>::outbound_message_data(lane, nonce)?;
+			let decoded_payload =
+				FromThisChainMessagePayload::<BridgeConfig>::decode(&mut &message_data.payload[..]).ok()?;
+			Some(MessageDetails {
+				nonce,
+				dispatch_weight: decoded_payload.weight,
+				size: message_data.payload.len() as _,
+				delivery_and_dispatch_fee: message_data.fee,
+				dispatch_fee_payment: decoded_payload.dispatch_fee_payment,
+			})
+		})
+		.collect()
+}
diff --git a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs
index 3785f4a4607f0af545eca7583b86d3219c76541f..217560e114344c61e502d888d92f130e09732db2 100644
--- a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs
+++ b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs
@@ -20,8 +20,8 @@
 #![cfg(feature = "runtime-benchmarks")]
 
 use crate::messages::{
-	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, AccountIdOf, BalanceOf,
-	BridgedChain, HashOf, MessageBridge, ThisChain,
+	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
+	AccountIdOf, BalanceOf, BridgedChain, HashOf, MessageBridge, ThisChain,
 };
 
 use bp_messages::{LaneId, MessageData, MessageKey, MessagePayload};
@@ -29,13 +29,16 @@ use bp_runtime::ChainId;
 use codec::Encode;
 use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH};
 use frame_support::weights::Weight;
-use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams, ProofSize};
+use pallet_bridge_messages::benchmarking::{
+	MessageDeliveryProofParams, MessageProofParams, ProofSize,
+};
 use sp_core::Hasher;
 use sp_runtime::traits::Header;
 use sp_std::prelude::*;
 use sp_trie::{record_all_keys, trie_types::TrieDBMut, Layout, MemoryDB, Recorder, TrieMut};
 
-/// Generate ed25519 signature to be used in `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`.
+/// Generate ed25519 signature to be used in
+/// `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`.
 ///
 /// Returns public key of the signer and the signature itself.
 pub fn ed25519_sign(
@@ -47,8 +50,8 @@ pub fn ed25519_sign(
 ) -> ([u8; 32], [u8; 64]) {
 	// key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html)
 	let target_secret = SecretKey::from_bytes(&[
-		157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, 197, 105, 123, 050,
-		105, 025, 112, 059, 172, 003, 028, 174, 127, 096,
+		157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073,
+		197, 105, 123, 050, 105, 025, 112, 059, 172, 003, 028, 174, 127, 096,
 	])
 	.expect("harcoded key is valid");
 	let target_public: PublicKey = (&target_secret).into();
@@ -56,7 +59,8 @@ pub fn ed25519_sign(
 	let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH];
 	target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes());
 	target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes());
-	let target_pair = ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid");
+	let target_pair =
+		ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid");
 
 	let signature_message = pallet_bridge_dispatch::account_ownership_digest(
 		target_call,
@@ -92,11 +96,8 @@ where
 	MH: Fn(H::Out) -> <R::BridgedChain as bp_runtime::Chain>::Header,
 {
 	// prepare Bridged chain storage with messages and (optionally) outbound lane state
-	let message_count = params
-		.message_nonces
-		.end()
-		.saturating_sub(*params.message_nonces.start())
-		+ 1;
+	let message_count =
+		params.message_nonces.end().saturating_sub(*params.message_nonces.start()) + 1;
 	let mut storage_keys = Vec::with_capacity(message_count as usize + 1);
 	let mut root = Default::default();
 	let mut mdb = MemoryDB::default();
@@ -105,10 +106,7 @@ where
 
 		// insert messages
 		for nonce in params.message_nonces.clone() {
-			let message_key = MessageKey {
-				lane_id: params.lane,
-				nonce,
-			};
+			let message_key = MessageKey { lane_id: params.lane, nonce };
 			let message_data = MessageData {
 				fee: BalanceOf::<BridgedChain<B>>::from(0),
 				payload: message_payload.clone(),
@@ -220,7 +218,7 @@ fn grow_trie<H: Hasher>(mut root: H::Out, mdb: &mut MemoryDB<H>, trie_size: Proo
 			.expect("record_all_keys should not fail in benchmarks");
 		let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum();
 		if size > minimal_trie_size as _ {
-			return root;
+			return root
 		}
 
 		let mut trie = TrieDBMut::<H>::from_existing(mdb, &mut root)
diff --git a/polkadot/bridges/deny.toml b/polkadot/bridges/deny.toml
index e754b8e9bd36286f3c1650d094655bf8df27b36e..d22897182af29127f2ee0994549dd1b70af2fc04 100644
--- a/polkadot/bridges/deny.toml
+++ b/polkadot/bridges/deny.toml
@@ -48,27 +48,21 @@ notice = "warn"
 # A list of advisory IDs to ignore. Note that ignored advisories will still
 # output a note when they are encountered.
 ignore = [
-	# generic-array lifetime errasure. If all upstream crates upgrade to >=0.14.0
-	# we can remove this.
-	"RUSTSEC-2020-0146",
 	# yaml-rust < clap. Not feasible to upgrade and also not possible to trigger in practice.
 	"RUSTSEC-2018-0006",
-    # Comes from wasmtime via Substrate: 'cranelift-codegen'
-    "RUSTSEC-2021-0067",
-    # Comes from libp2p via Substrate: 'aes-soft', 'aesni', 'block-cipher', 'stream-cipher'
-    "RUSTSEC-2021-0060",
-    "RUSTSEC-2021-0059",
-    "RUSTSEC-2020-0057",
-    "RUSTSEC-2021-0064",
-    # Comes from jsonrpc via Substrate: 'failure', 'net2', 'lock_api'
-    "RUSTSEC-2020-0036",
-    "RUSTSEC-2020-0077",
-    "RUSTSEC-2019-0036",
     "RUSTSEC-2020-0070",
     # Comes from honggfuzz via storage-proof-fuzzer: 'memmap'
     "RUSTSEC-2020-0077",
     # Comes from time: 'stweb' (will be fixed in upcoming time 0.3)
-    "RUSTSEC-2020-0056"
+    "RUSTSEC-2020-0056",
+    # net2 (origin: Substrate RPC crates)
+    "RUSTSEC-2020-0016",
+    # Wasmtime (origin: Substrate executor crates)
+    "RUSTSEC-2021-0110",
+    # time (origin: Substrate RPC + benchmarking crates)
+    "RUSTSEC-2020-0071",
+    # chrono (origin: Substrate benchmarking + cli + ...)
+    "RUSTSEC-2020-0159",
 ]
 # Threshold for security vulnerabilities, any vulnerability with a CVSS score
 # lower than the range specified will be ignored. Note that ignored advisories
@@ -85,7 +79,7 @@ ignore = [
 # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
 [licenses]
 # The lint level for crates which do not have a detectable license
-unlicensed = "deny"
+unlicensed = "allow"
 # List of explictly allowed licenses
 # See https://spdx.org/licenses/ for list of possible licenses
 # [possible values: any SPDX 3.7 short identifier (+ optional exception)].
diff --git a/polkadot/bridges/deployments/README.md b/polkadot/bridges/deployments/README.md
index d553fca611a61b8a8986bb091605bd7b30c0da25..f9207e6d683e9ac60990223d71236870da5a7f09 100644
--- a/polkadot/bridges/deployments/README.md
+++ b/polkadot/bridges/deployments/README.md
@@ -93,7 +93,7 @@ seeds for the `sr25519` keys. This seed may also be used in the signer argument
 and PoA relays. Example:
 
 ```bash
-./substrate-relay relay-headers RialtoToMillau \
+./substrate-relay relay-headers rialto-to-millau \
 	--source-host rialto-node-alice \
 	--source-port 9944 \
 	--target-host millau-node-alice \
@@ -121,7 +121,9 @@ Following accounts are used when `rialto-millau` bridge is running:
 - Millau's `Eve` signs relay transactions with message delivery confirmations (lane 00000001) from Rialto to Millau;
 - Rialto's `Eve` signs relay transactions with messages (lane 00000001) from Millau to Rialto;
 - Millau's `Ferdie` signs relay transactions with messages (lane 00000001) from Rialto to Millau;
-- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto.
+- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto;
+- Millau's `RialtoMessagesOwner` signs relay transactions with updated Rialto -> Millau conversion rate;
+- Rialto's `MillauMessagesOwner` signs relay transactions with updated Millau -> Rialto conversion rate.
 
 Following accounts are used when `westend-millau` bridge is running:
 
diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh
index 432cdd6b72c5abb01fba0c3fc76260bf51cf341c..b49362c03cddfe54852580b799e28e87dca3fbd3 100755
--- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-poa-to-rialto-entrypoint.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://poa-node-arthur:8545/api/health
 curl -v http://poa-node-bertha:8545/api/health
 curl -v http://poa-node-carlos:8545/api/health
diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh
index 1677cc1accde5788bb0c8fb6c38551ee5307bc42..8fbf9ac0d97cc7e7dffa09fd61c07f1cbb6fdee6 100755
--- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-headers-rialto-to-poa-entrypoint.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 
 curl -v http://rialto-node-bob:9933/health
 curl -v http://poa-node-bertha:8545/api/health
diff --git a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh
index 131a31ffbea9590b0570a74870f1545e74aadbf9..54d6baeebb0666dd83ffe9fb010886897bafce0b 100755
--- a/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/poa-rialto/entrypoints/relay-poa-exchange-rialto-entrypoint.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://poa-node-arthur:8545/api/health
 curl -v http://poa-node-bertha:8545/api/health
 curl -v http://poa-node-carlos:8545/api/health
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json
index 69396162bbaa8d436c303fa334330f809d5a4b0a..6fd0fb4ebc7d22882c5eafaf6bc26f821f17e70a 100644
--- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json
@@ -471,7 +471,7 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "5m",
+        "for": "7m",
         "frequency": "1m",
         "handler": 1,
         "name": "Messages from Millau to Rialto are not being delivered",
@@ -896,7 +896,7 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "5m",
+        "for": "7m",
         "frequency": "1m",
         "handler": 1,
         "name": "Messages (00000001) from Millau to Rialto are not being delivered",
@@ -967,8 +967,7 @@
           "fill": true,
           "line": true,
           "op": "lt",
-          "value": 1,
-          "yaxis": "left"
+          "value": 1
         }
       ],
       "timeFrom": null,
@@ -1155,6 +1154,249 @@
         "alignLevel": null
       }
     },
+    {
+      "alert": {
+        "alertRuleTags": {},
+        "conditions": [
+          {
+            "evaluator": {
+              "params": [
+                1
+              ],
+              "type": "lt"
+            },
+            "operator": {
+              "type": "and"
+            },
+            "query": {
+              "params": [
+                "B",
+                "1m",
+                "now"
+              ]
+            },
+            "reducer": {
+              "params": [],
+              "type": "max"
+            },
+            "type": "query"
+          }
+        ],
+        "executionErrorState": "alerting",
+        "for": "5m",
+        "frequency": "1m",
+        "handler": 1,
+        "name": "Token swap messages from Millau to Rialto are not being delivered",
+        "noDataState": "no_data",
+        "notifications": []
+      },
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "Prometheus",
+      "fieldConfig": {
+        "defaults": {
+          "custom": {},
+          "links": []
+        },
+        "overrides": []
+      },
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 11,
+        "w": 12,
+        "x": 0,
+        "y": 38
+      },
+      "hiddenSeries": false,
+      "id": 23,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pluginVersion": "7.1.3",
+      "pointradius": 2,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")",
+          "interval": "",
+          "legendFormat": "{{type}}",
+          "refId": "A"
+        },
+        {
+          "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"target_latest_received\"}[20m])",
+          "hide": true,
+          "interval": "",
+          "legendFormat": "Messages generated in last 5 minutes",
+          "refId": "B"
+        }
+      ],
+      "thresholds": [
+        {
+          "colorMode": "critical",
+          "fill": true,
+          "line": true,
+          "op": "lt",
+          "value": 1
+        }
+      ],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Delivery race (73776170)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "Prometheus",
+      "fieldConfig": {
+        "defaults": {
+          "custom": {},
+          "links": []
+        },
+        "overrides": []
+      },
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 11,
+        "w": 12,
+        "x": 12,
+        "y": 38
+      },
+      "hiddenSeries": false,
+      "id": 24,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pluginVersion": "7.1.3",
+      "pointradius": 2,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")",
+          "interval": "",
+          "legendFormat": "{{type}}",
+          "refId": "A"
+        },
+        {
+          "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"source_latest_confirmed\"}[10m])",
+          "hide": true,
+          "interval": "",
+          "legendFormat": "",
+          "refId": "B"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Confirmations race (73776170)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
     {
       "datasource": "Prometheus",
       "fieldConfig": {
@@ -1181,7 +1423,7 @@
         "h": 8,
         "w": 8,
         "x": 0,
-        "y": 38
+        "y": 49
       },
       "id": 16,
       "options": {
@@ -1230,7 +1472,7 @@
         "h": 8,
         "w": 8,
         "x": 8,
-        "y": 38
+        "y": 49
       },
       "hiddenSeries": false,
       "id": 18,
@@ -1323,7 +1565,7 @@
         "h": 8,
         "w": 8,
         "x": 16,
-        "y": 38
+        "y": 49
       },
       "hiddenSeries": false,
       "id": 20,
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json
index 29691e0a060c695deeb99b786ac7e33924cac478..33725dc60961592e2d98eebadb02ee250b70938d 100644
--- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json
@@ -462,7 +462,7 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "5m",
+        "for": "7m",
         "frequency": "1m",
         "handler": 1,
         "name": "Messages from Rialto to Millau are not being delivered",
@@ -887,7 +887,7 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "5m",
+        "for": "7m",
         "frequency": "1m",
         "handler": 1,
         "name": "Messages (00000001) from Rialto to Millau are not being delivered",
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml
index 5f00e449c3b0bdbe491d291daf0c76f8f11d1ecf..1ff93869de1cb46f3af31f93bbe606f77d6ba0a1 100644
--- a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml
@@ -1,4 +1,4 @@
-# Exposed ports: 10016, 10116, 10216, 10316, 10416
+# Exposed ports: 10016, 10116, 10216, 10316, 10416, 10516, 10716
 
 version: '3.5'
 services:
@@ -52,6 +52,7 @@ services:
   relay-messages-millau-to-rialto-generator:
     <<: *sub-bridge-relay
     environment:
+      RUST_LOG: bridge=trace
       MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001"
     entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh
     ports:
@@ -59,13 +60,24 @@ services:
     depends_on:
       - relay-millau-rialto
 
+  relay-messages-millau-to-rialto-resubmitter:
+    <<: *sub-bridge-relay
+    environment:
+      RUST_LOG: bridge=trace
+    entrypoint: /entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh
+    ports:
+      - "10316:9616"
+    depends_on:
+      - relay-messages-millau-to-rialto-generator
+
   relay-messages-rialto-to-millau-lane-00000001:
     <<: *sub-bridge-relay
     environment:
+      RUST_LOG: bridge=trace
       MSG_EXCHANGE_GEN_LANE: "00000001"
     entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh
     ports:
-      - "10316:9616"
+      - "10416:9616"
     depends_on:
       - relay-millau-rialto
 
@@ -75,7 +87,15 @@ services:
       MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001"
     entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh
     ports:
-      - "10416:9616"
+      - "10516:9616"
+    depends_on:
+      - relay-millau-rialto
+
+  relay-token-swap-generator:
+    <<: *sub-bridge-relay
+    entrypoint: /entrypoints/relay-token-swap-generator-entrypoint.sh
+    ports:
+      - "10716:9616"
     depends_on:
       - relay-millau-rialto
 
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh
index 26be814b6941fad513461bf18104d57d8d566ad2..758dce2515aa8586dcddbd2738259f08fb2b4fd6 100755
--- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh
@@ -1,13 +1,13 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://millau-node-bob:9933/health
 curl -v http://rialto-node-bob:9933/health
 
 MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000}
 
-/home/user/substrate-relay relay-messages MillauToRialto \
+/home/user/substrate-relay relay-messages millau-to-rialto \
 	--lane $MESSAGE_LANE \
 	--source-host millau-node-bob \
 	--source-port 9944 \
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh
index 04bde07ad971b39c1dd495f7015586f303ebb2f8..e0731e9058d1dea3af074a93b34d105db36443f5 100755
--- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh
@@ -1,13 +1,13 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://millau-node-bob:9933/health
 curl -v http://rialto-node-bob:9933/health
 
 MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000}
 
-/home/user/substrate-relay relay-messages RialtoToMillau \
+/home/user/substrate-relay relay-messages rialto-to-millau \
 	--lane $MESSAGE_LANE \
 	--source-host rialto-node-bob \
 	--source-port 9944 \
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh
index 96676bad85b03ccd5f6c7233d799b4dfa9255447..b8d051a13122bf0388e1ab5ec37dbc774a26e676 100755
--- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh
@@ -14,7 +14,7 @@ SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE}
 MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=1024
 FERDIE_ADDR=5oSLwptwgySxh5vz1HdvznQJjbQVgwYSvHEpYYeTXu1Ei8j7
 
-SHARED_CMD="/home/user/substrate-relay send-message RialtoToMillau"
+SHARED_CMD="/home/user/substrate-relay send-message rialto-to-millau"
 SHARED_HOST="--source-host rialto-node-bob --source-port 9944"
 DAVE_SIGNER="--source-signer //Dave --target-signer //Dave"
 
@@ -25,6 +25,8 @@ rand_sleep() {
 	SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1`
 	echo "Sleeping $SUBMIT_DELAY_S seconds..."
 	sleep $SUBMIT_DELAY_S
+	NOW=`date "+%Y-%m-%d %H:%M:%S"`
+	echo "Woke up at $NOW"
 }
 
 # start sending large messages immediately
@@ -32,6 +34,10 @@ LARGE_MESSAGES_TIME=0
 # start sending message packs in a hour
 BUNCH_OF_MESSAGES_TIME=3600
 
+# give conversion rate updater some time to update Millau->Rialto conversion rate in Rialto
+# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value)
+sleep 180
+
 while true
 do
 	rand_sleep
@@ -46,6 +52,7 @@ do
 		$SEND_MESSAGE \
 			--lane $SECONDARY_MESSAGE_LANE \
 			--origin Target \
+			--dispatch-fee-payment at-target-chain \
 			remark
 	fi
 
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh
index c24ec8ea7f40a7e8825b9419b5ee61964425ba8a..0365ebe1d8b46b8d79ce4ce01d01ff4bfc049cc7 100755
--- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh
@@ -14,7 +14,7 @@ SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE}
 MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=128
 FERDIE_ADDR=6ztG3jPnJTwgZnnYsgCDXbbQVR82M96hBZtPvkN56A9668ZC
 
-SHARED_CMD=" /home/user/substrate-relay send-message MillauToRialto"
+SHARED_CMD=" /home/user/substrate-relay send-message millau-to-rialto"
 SHARED_HOST="--source-host millau-node-bob --source-port 9944"
 DAVE_SIGNER="--target-signer //Dave --source-signer //Dave"
 
@@ -25,6 +25,8 @@ rand_sleep() {
 	SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1`
 	echo "Sleeping $SUBMIT_DELAY_S seconds..."
 	sleep $SUBMIT_DELAY_S
+	NOW=`date "+%Y-%m-%d %H:%M:%S"`
+	echo "Woke up at $NOW"
 }
 
 # start sending large messages immediately
@@ -32,6 +34,10 @@ LARGE_MESSAGES_TIME=0
 # start sending message packs in a hour
 BUNCH_OF_MESSAGES_TIME=3600
 
+# give conversion rate updater some time to update Rialto->Millau conversion rate in Millau
+# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value)
+sleep 180
+
 while true
 do
 	rand_sleep
@@ -46,6 +52,7 @@ do
 		$SEND_MESSAGE \
 			--lane $SECONDARY_MESSAGE_LANE \
 			--origin Target \
+			--dispatch-fee-payment at-target-chain \
 			remark
 	fi
 
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..ca4c9f03a8bb80672ea3d37684c63c039ddbbf10
--- /dev/null
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+set -xeu
+
+sleep 20
+curl -v http://millau-node-alice:9933/health
+
+# //Dave is signing Millau -> Rialto message-send transactions, which are causing problems.
+#
+# When large message is being sent from Millau to Rialto AND other transactions are
+# blocking it from being mined, we'll see something like this in logs:
+#
+# Millau transaction priority with tip=0: 17800827994. Target priority:
+# 526186677695
+#
+# So since fee multiplier in Millau is `1` and `WeightToFee` is `IdentityFee`, then
+# we need tip around `526186677695 - 17800827994 = 508_385_849_701`. Let's round it
+# up to `1_000_000_000_000`.
+
+/home/user/substrate-relay resubmit-transactions millau \
+	--target-host millau-node-alice \
+	--target-port 9944 \
+	--target-signer //Dave \
+	--stalled-blocks 5 \
+	--tip-limit 1000000000000 \
+	--tip-step 1000000000 \
+	make-it-best-transaction
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh
index 4b50ac086a882b25be4920a10d49163cd5a12852..c87591fb6dbb75d6f8b2c26ed30e2c2d55f184f6 100755
--- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh
@@ -1,18 +1,18 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://millau-node-alice:9933/health
 curl -v http://rialto-node-alice:9933/health
 
-/home/user/substrate-relay init-bridge MillauToRialto \
+/home/user/substrate-relay init-bridge millau-to-rialto \
 	--source-host millau-node-alice \
 	--source-port 9944 \
 	--target-host rialto-node-alice \
 	--target-port 9944 \
 	--target-signer //Alice
 
-/home/user/substrate-relay init-bridge RialtoToMillau \
+/home/user/substrate-relay init-bridge rialto-to-millau \
 	--source-host rialto-node-alice \
 	--source-port 9944 \
 	--target-host millau-node-alice \
@@ -26,8 +26,11 @@ sleep 6
 	--millau-host millau-node-alice \
 	--millau-port 9944 \
 	--millau-signer //Charlie \
+	--millau-messages-pallet-owner=//RialtoMessagesOwner \
 	--rialto-host rialto-node-alice \
 	--rialto-port 9944 \
 	--rialto-signer //Charlie \
+	--rialto-messages-pallet-owner=//MillauMessagesOwner \
 	--lane=00000000 \
+	--lane=73776170 \
 	--prometheus-host=0.0.0.0
diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..95bbe1e38fb295d80a83c30279c3c144ebd8a38c
--- /dev/null
+++ b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT
+#
+# This scripts periodically calls the Substrate relay binary to generate messages. These messages
+# are sent from the Millau network to the Rialto network.
+
+set -eu
+
+# Max delay before submitting transactions (s)
+MAX_SUBMIT_DELAY_S=60
+SOURCE_HOST=millau-node-charlie
+SOURCE_PORT=9944
+TARGET_HOST=rialto-node-charlie
+TARGET_PORT=9944
+
+# Sleep a bit between messages
+rand_sleep() {
+	SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1`
+	echo "Sleeping $SUBMIT_DELAY_S seconds..."
+	sleep $SUBMIT_DELAY_S
+	NOW=`date "+%Y-%m-%d %H:%M:%S"`
+	echo "Woke up at $NOW"
+}
+
+# give conversion rate updater some time to update Rialto->Millau conversion rate in Millau
+# (initially rate=1 and rational relayer won't deliver any messages if it'll be changed to larger value)
+sleep 180
+
+while true
+do
+	rand_sleep
+	echo "Initiating token-swap between Rialto and Millau"
+	/home/user/substrate-relay \
+		swap-tokens \
+		millau-to-rialto \
+		--source-host $SOURCE_HOST \
+		--source-port $SOURCE_PORT \
+		--source-signer //WithRialtoTokenSwap \
+		--source-balance 100000 \
+		--target-host $TARGET_HOST \
+		--target-port $TARGET_PORT \
+		--target-signer //WithMillauTokenSwap \
+		--target-balance 200000 \
+		lock-until-block \
+		--blocks-before-expire 32
+done
diff --git a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json
index e73ddea40f1a6bc079b860174977f9f0c47de15e..6003604fa5310bd6b3cf8b6f20556ca1ae3431cc 100644
--- a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json
+++ b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json
@@ -24,7 +24,7 @@
           {
             "evaluator": {
               "params": [
-                5
+                32
               ],
               "type": "gt"
             },
@@ -46,11 +46,11 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "5m",
+        "for": "60m",
         "frequency": "5m",
         "handler": 1,
         "message": "",
-        "name": "Synced Header Difference is Over 5 (Westend to Millau)",
+        "name": "Synced Header Difference is Over 32 (Westend to Millau)",
         "noDataState": "no_data",
         "notifications": []
       },
@@ -163,7 +163,7 @@
           {
             "evaluator": {
               "params": [
-                5
+                32
               ],
               "type": "lt"
             },
@@ -185,7 +185,7 @@
           }
         ],
         "executionErrorState": "alerting",
-        "for": "3m",
+        "for": "60m",
         "frequency": "5m",
         "handler": 1,
         "name": "No New Headers (Westend to Millau)",
@@ -237,9 +237,9 @@
       "steppedLine": false,
       "targets": [
         {
-          "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[2m])",
+          "expr": "max_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[10m])-min_over_time(Westend_to_Millau_Sync_best_block_numbers{node=\"source\"}[10m])",
           "interval": "",
-          "legendFormat": "Number of new Headers on Westend (Last 2 Mins)",
+          "legendFormat": "Number of new Headers on Westend (Last 10 Mins)",
           "refId": "A"
         }
       ],
diff --git a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh
index 4a96ade6ec85302a936e493791fcb2f4213c7574..d3b6932983fba343392ce35568ace2885b6a498c 100755
--- a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh
+++ b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh
@@ -1,11 +1,11 @@
 #!/bin/bash
 set -xeu
 
-sleep 20
+sleep 60
 curl -v http://millau-node-alice:9933/health
 curl -v https://westend-rpc.polkadot.io:443/health
 
-/home/user/substrate-relay init-bridge WestendToMillau \
+/home/user/substrate-relay init-bridge westend-to-millau \
 	--source-host westend-rpc.polkadot.io \
 	--source-port 443 \
 	--source-secure \
@@ -15,11 +15,12 @@ curl -v https://westend-rpc.polkadot.io:443/health
 
 # Give chain a little bit of time to process initialization transaction
 sleep 6
-/home/user/substrate-relay relay-headers WestendToMillau \
+/home/user/substrate-relay relay-headers westend-to-millau \
 	--source-host westend-rpc.polkadot.io \
 	--source-port 443 \
 	--source-secure \
 	--target-host millau-node-alice \
 	--target-port 9944 \
 	--target-signer //George \
+	--target-transactions-mortality=4\
 	--prometheus-host=0.0.0.0
diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh
index 2736243c5a48fab7cdc2c2c5bac963e1dbdac4a3..61028e1756b3cad0f46f59f8523c2b806e7e7e13 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh
@@ -8,14 +8,14 @@
 
 set -xeu
 
-RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge RococoToWococo \
+RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge rococo-to-wococo \
 	--source-host 127.0.0.1 \
 	--source-port 9955 \
 	--target-host 127.0.0.1 \
 	--target-port 9944 \
 	--target-signer //Alice
 
-RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers RococoToWococo \
+RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers rococo-to-wococo \
 	--source-host 127.0.0.1 \
 	--source-port 9955 \
 	--target-host 127.0.0.1 \
diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh
index b3a7e383d9b950c0d18e378f87e20a570df86950..c57db2086fb412338efd7af1efd27c229d15f175 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh
@@ -8,14 +8,14 @@
 
 set -xeu
 
-RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge WococoToRococo \
+RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge wococo-to-rococo \
 	--source-host 127.0.0.1 \
 	--source-port 9944 \
 	--target-host 127.0.0.1 \
 	--target-port 9955 \
 	--target-signer //Alice
 
-RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers WococoToRococo \
+RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers wococo-to-rococo \
 	--source-host 127.0.0.1 \
 	--source-port 9944 \
 	--target-host 127.0.0.1 \
diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh
index 5b298a149f8a30a6346db4b187b6d1315cfb0ed9..d420dc56c263f66a95401fd49a276bdcfe68bd9c 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh
@@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}"
 RIALTO_PORT="${RIALTO_PORT:-9944}"
 
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay relay-messages MillauToRialto \
+./target/debug/substrate-relay relay-messages millau-to-rialto \
 	--lane 00000000 \
 	--source-host localhost \
 	--source-port $MILLAU_PORT \
diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh
index 616697192b961e33c9ec530e1ddb9f0a6659b1c6..0cd73c00454d9ff5d5c408f49da8c66d4a68a0c3 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh
@@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}"
 RIALTO_PORT="${RIALTO_PORT:-9944}"
 
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay relay-messages RialtoToMillau \
+./target/debug/substrate-relay relay-messages rialto-to-millau \
 	--lane 00000000 \
 	--source-host localhost \
 	--source-port $RIALTO_PORT \
diff --git a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh
index 59c75de3899fb482eb39e8c155e2381b11156c0c..8b18cff2b53c22081d06732005ea8ecb50dc4528 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh
@@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}"
 RIALTO_PORT="${RIALTO_PORT:-9944}"
 
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay init-bridge MillauToRialto \
+./target/debug/substrate-relay init-bridge millau-to-rialto \
 	--source-host localhost \
 	--source-port $MILLAU_PORT \
 	--target-host localhost \
@@ -18,7 +18,7 @@ RUST_LOG=bridge=debug \
 
 sleep 5
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay relay-headers MillauToRialto \
+./target/debug/substrate-relay relay-headers millau-to-rialto \
 	--source-host localhost \
 	--source-port $MILLAU_PORT \
 	--target-host localhost \
diff --git a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh
index 6382cdca82374bafd22f52e88db0a4b1cd7c4cd4..c66c994f06ab675420b4863b1090e3dae7302de6 100755
--- a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh
+++ b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh
@@ -9,7 +9,7 @@ MILLAU_PORT="${MILLAU_PORT:-9945}"
 RIALTO_PORT="${RIALTO_PORT:-9944}"
 
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay init-bridge RialtoToMillau \
+./target/debug/substrate-relay init-bridge rialto-to-millau \
 	--target-host localhost \
 	--target-port $MILLAU_PORT \
 	--source-host localhost \
@@ -18,7 +18,7 @@ RUST_LOG=bridge=debug \
 
 sleep 5
 RUST_LOG=bridge=debug \
-./target/debug/substrate-relay relay-headers RialtoToMillau \
+./target/debug/substrate-relay relay-headers rialto-to-millau \
 	--target-host localhost \
 	--target-port $MILLAU_PORT \
 	--source-host localhost \
diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0898978096d33111f53d2a72119fa75dc2e1ca7d
--- /dev/null
+++ b/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -xeu
+
+/home/user/rialto-bridge-node build-spec \
+	--chain local \
+	--raw \
+	--disable-default-bootnode \
+	> /rialto-share/rialto-relaychain-spec-raw.json
+
+# we're using local driver + tmpfs for shared `/rialto-share` volume, which is populated
+# by the container running this script. If this script ends, the volume will be detached
+# and our chain spec will be lost when it'll go online again. Hence the never-ending
+# script which keeps volume online until container is stopped.
+tail -f /dev/null
diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..172502327c9a071a3581a1a60dff955667f6e29f
--- /dev/null
+++ b/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+set -xeu
+
+sleep 60
+curl -v http://rialto-node-alice:9933/health
+curl -v http://rialto-parachain-collator-alice:9933/health
+
+/home/user/substrate-relay register-parachain rialto-parachain \
+	--parachain-host rialto-parachain-collator-alice \
+	--parachain-port 9944 \
+	--relaychain-host rialto-node-alice \
+	--relaychain-port 9944 \
+	--relaychain-signer //Alice
diff --git a/polkadot/bridges/deployments/networks/millau.yml b/polkadot/bridges/deployments/networks/millau.yml
index 54790579f1c165d1991bbcd83b2eadf44986421f..001f3a060947d6d60f3f86afdf6397b330177a51 100644
--- a/polkadot/bridges/deployments/networks/millau.yml
+++ b/polkadot/bridges/deployments/networks/millau.yml
@@ -20,7 +20,7 @@ services:
       - --unsafe-rpc-external
       - --unsafe-ws-external
     environment:
-      RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace
+      RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,sc_basic_authorship=trace
     ports:
       - "19933:9933"
       - "19944:9944"
diff --git a/polkadot/bridges/deployments/networks/rialto-parachain.yml b/polkadot/bridges/deployments/networks/rialto-parachain.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b2d2188f1babb2406d98c95cb1f5cc4e78a1ef47
--- /dev/null
+++ b/polkadot/bridges/deployments/networks/rialto-parachain.yml
@@ -0,0 +1,90 @@
+# Compose file for quickly spinning up a local instance of the Rialto Parachain network.
+#
+# Since Rialto Parachain is unusable without Rialto, this file depends on some Rialto
+# network nodes.
+version: '3.5'
+services:
+  rialto-parachain-collator-alice: &rialto-parachain-collator
+    image: paritytech/rialto-parachain-collator
+    entrypoint: >
+      /home/user/rialto-parachain-collator
+      --alice
+      --collator
+      --force-authoring
+      --parachain-id 2000
+      --rpc-port 9933
+      --ws-port 9944
+      --rpc-cors=all
+      --unsafe-rpc-external
+      --unsafe-ws-external
+      --
+      --execution wasm
+      --chain /rialto-share/rialto-relaychain-spec-raw.json
+      --rpc-port 9934
+      --ws-port 9945
+    volumes:
+      - rialto-share:/rialto-share:z
+    environment:
+      RUST_LOG: runtime=trace,rpc=trace,txpool=trace,parachain=trace,parity_ws=trace
+    depends_on:
+      - rialto-chainspec-exporter
+    ports:
+      - "20433:9933"
+      - "20444:9944"
+
+  rialto-parachain-collator-bob:
+    <<: *rialto-parachain-collator
+    entrypoint: >
+      /home/user/rialto-parachain-collator
+      --bob
+      --collator
+      --force-authoring
+      --parachain-id 2000
+      --rpc-port 9933
+      --ws-port 9944
+      --rpc-cors=all
+      --unsafe-rpc-external
+      --unsafe-ws-external
+      --
+      --execution wasm
+      --chain /rialto-share/rialto-relaychain-spec-raw.json
+      --rpc-port 9934
+      --ws-port 9945
+    ports:
+      - "20533:9933"
+      - "20544:9944"
+
+  rialto-parachain-collator-charlie:
+    <<: *rialto-parachain-collator
+    entrypoint: >
+      /home/user/rialto-parachain-collator
+      --charlie
+      --collator
+      --force-authoring
+      --parachain-id 2000
+      --rpc-port 9933
+      --ws-port 9944
+      --rpc-cors=all
+      --unsafe-rpc-external
+      --unsafe-ws-external
+      --
+      --execution wasm
+      --chain /rialto-share/rialto-relaychain-spec-raw.json
+      --rpc-port 9934
+      --ws-port 9945
+    ports:
+      - "20633:9933"
+      - "20644:9944"
+
+  rialto-parachain-registrar:
+    image: paritytech/substrate-relay
+    entrypoint: /entrypoints/rialto-parachain-registrar-entrypoint.sh
+    volumes:
+      - ./networks/entrypoints:/entrypoints
+      - rialto-share:/rialto-share:z
+    environment:
+      RUST_LOG: bridge=trace
+    depends_on:
+      - rialto-node-alice
+      - rialto-parachain-collator-alice
+
diff --git a/polkadot/bridges/deployments/networks/rialto.yml b/polkadot/bridges/deployments/networks/rialto.yml
index 3039d7c33bcd41bac0397b7ac96ea4f1aa21bec0..9b902a1ca28a7babf7385665f76391a29e76ffb5 100644
--- a/polkadot/bridges/deployments/networks/rialto.yml
+++ b/polkadot/bridges/deployments/networks/rialto.yml
@@ -85,3 +85,20 @@ services:
     ports:
       - "10333:9933"
       - "10344:9944"
+
+  rialto-chainspec-exporter:
+    image: paritytech/rialto-bridge-node
+    entrypoint: /entrypoints/rialto-chainspec-exporter-entrypoint.sh
+    volumes:
+      - ./networks/entrypoints:/entrypoints
+      - rialto-share:/rialto-share:z
+
+# we're using `/rialto-share` to expose Rialto chain spec to those who are interested. Right
+# now it is Rialto Parachain collator nodes. Local + tmpfs combination allows sharing writable
+# in-memory volumes, which are dropped when containers are stopped.
+volumes:
+  rialto-share:
+    driver: local
+    driver_opts:
+      type: "tmpfs"
+      device: "tmpfs"
diff --git a/polkadot/bridges/deployments/run.sh b/polkadot/bridges/deployments/run.sh
index a79638352a383e11c5e40823d353594480ccd5ab..a82e0985c5e53644cbcbf9eed7857b50f3c6d6db 100755
--- a/polkadot/bridges/deployments/run.sh
+++ b/polkadot/bridges/deployments/run.sh
@@ -36,16 +36,18 @@ function show_help () {
   echo " "
   echo "Options:"
   echo "  --no-monitoring                            Disable monitoring"
+  echo "  --no-ui                                    Disable UI"
   echo " "
   echo "You can start multiple bridges at once by passing several bridge names:"
   echo "  ./run.sh poa-rialto rialto-millau westend-millau [stop|update]"
   exit 1
 }
 
-RIALTO=' -f ./networks/rialto.yml'
+RIALTO=' -f ./networks/rialto.yml -f ./networks/rialto-parachain.yml'
 MILLAU=' -f ./networks/millau.yml'
 ETH_POA=' -f ./networks/eth-poa.yml'
 MONITORING=' -f ./monitoring/docker-compose.yml'
+UI=' -f ./ui/docker-compose.yml'
 
 BRIDGES=()
 NETWORKS=''
@@ -58,6 +60,11 @@ do
       shift
       continue
       ;;
+    --no-ui)
+      UI=""
+      shift
+      continue
+      ;;
     poa-rialto)
       BRIDGES+=($i)
       NETWORKS+=${RIALTO}
@@ -94,7 +101,7 @@ if [ ${#BRIDGES[@]} -eq 0 ]; then
   show_help "Missing bridge name."
 fi
 
-COMPOSE_FILES=$NETWORKS$MONITORING
+COMPOSE_FILES=$NETWORKS$MONITORING$UI
 
 # Compose looks for .env files in the the current directory by default, we don't want that
 COMPOSE_ARGS="--project-directory ."
diff --git a/polkadot/bridges/deployments/types-millau.json b/polkadot/bridges/deployments/types-millau.json
index a15527f59d794d5f2025a18808e5f999b71fd78d..6d651b4c7cf733f01ee93c26bc0d981ffccf28b0 100644
--- a/polkadot/bridges/deployments/types-millau.json
+++ b/polkadot/bridges/deployments/types-millau.json
@@ -1,5 +1,7 @@
 {
   "--1": "Millau Types",
+  "MillauAddress": "AccountId",
+  "MillauLookupSource": "AccountId",
   "MillauBalance": "u64",
   "MillauBlockHash": "H512",
   "MillauBlockNumber": "u64",
@@ -25,6 +27,8 @@
     }
   },
   "--2": "Rialto Types",
+  "RialtoAddress": "MultiAddress",
+  "RialtoLookupSource": "MultiAddress",
   "RialtoBalance": "u128",
   "RialtoBlockHash": "H256",
   "RialtoBlockNumber": "u32",
@@ -50,8 +54,6 @@
     }
   },
   "--3": "Common types",
-  "Address": "AccountId",
-  "LookupSource": "AccountId",
   "AccountSigner": "MultiSigner",
   "SpecVersion": "u32",
   "RelayerId": "AccountId",
@@ -70,7 +72,7 @@
   "ChainId": "Id",
   "LaneId": "Id",
   "MessageNonce": "u64",
-  "MessageId": "(Id, u64)",
+  "BridgeMessageId": "(Id, u64)",
   "MessageKey": {
     "lane_id": "LaneId",
     "nonce:": "MessageNonce"
@@ -90,9 +92,9 @@
     "dispatch_results": "BitVec"
   },
   "OutboundLaneData": {
-    "latest_generated_nonce": "MessageNonce",
+    "oldest_unpruned_nonce": "MessageNonce",
     "latest_received_nonce": "MessageNonce",
-    "oldest_unpruned_nonce": "MessageNonce"
+    "latest_generated_nonce": "MessageNonce"
   },
   "MessageData": {
     "payload": "MessagePayload",
@@ -172,6 +174,8 @@
     "commit": "Commit",
     "votes_ancestries": "Vec<BridgedHeader>"
   },
+  "Address": "MillauAddress",
+  "LookupSource": "MillauLookupSource",
   "Fee": "MillauBalance",
   "Balance": "MillauBalance",
   "Hash": "MillauBlockHash",
diff --git a/polkadot/bridges/deployments/types-rialto.json b/polkadot/bridges/deployments/types-rialto.json
index 5375e43aea451dde9082ab2791f38db27e908d9b..a574e1178936cfffc1b12dba32cd47b522459f9f 100644
--- a/polkadot/bridges/deployments/types-rialto.json
+++ b/polkadot/bridges/deployments/types-rialto.json
@@ -1,5 +1,7 @@
 {
   "--1": "Millau Types",
+  "MillauAddress": "AccountId",
+  "MillauLookupSource": "AccountId",
   "MillauBalance": "u64",
   "MillauBlockHash": "H512",
   "MillauBlockNumber": "u64",
@@ -25,6 +27,8 @@
     }
   },
   "--2": "Rialto Types",
+  "RialtoAddress": "MultiAddress",
+  "RialtoLookupSource": "MultiAddress",
   "RialtoBalance": "u128",
   "RialtoBlockHash": "H256",
   "RialtoBlockNumber": "u32",
@@ -50,8 +54,6 @@
     }
   },
   "--3": "Common types",
-  "Address": "AccountId",
-  "LookupSource": "AccountId",
   "AccountSigner": "MultiSigner",
   "SpecVersion": "u32",
   "RelayerId": "AccountId",
@@ -70,7 +72,7 @@
   "ChainId": "Id",
   "LaneId": "Id",
   "MessageNonce": "u64",
-  "MessageId": "(Id, u64)",
+  "BridgeMessageId": "(Id, u64)",
   "MessageKey": {
     "lane_id": "LaneId",
     "nonce:": "MessageNonce"
@@ -90,9 +92,9 @@
     "dispatch_results": "BitVec"
   },
   "OutboundLaneData": {
-    "latest_generated_nonce": "MessageNonce",
+    "oldest_unpruned_nonce": "MessageNonce",
     "latest_received_nonce": "MessageNonce",
-    "oldest_unpruned_nonce": "MessageNonce"
+    "latest_generated_nonce": "MessageNonce"
   },
   "MessageData": {
     "payload": "MessagePayload",
@@ -172,6 +174,8 @@
     "commit": "Commit",
     "votes_ancestries": "Vec<BridgedHeader>"
   },
+  "Address": "RialtoAddress",
+  "LookupSource": "RialtoLookupSource",
   "Fee": "RialtoBalance",
   "Balance": "RialtoBalance",
   "BlockHash": "RialtoBlockHash",
@@ -183,5 +187,6 @@
     "_enum": {
       "RialtoToMillauConversionRate": "u128"
     }
-  }
+  },
+  "ValidationCodeHash": "H256"
 }
diff --git a/polkadot/bridges/deployments/types-rococo.json b/polkadot/bridges/deployments/types-rococo.json
index 6490266809f59425d9ee22c70f517bd6dcae7fe0..6f4592a8d5733712551b3e350f4b037ac148f9bc 100644
--- a/polkadot/bridges/deployments/types-rococo.json
+++ b/polkadot/bridges/deployments/types-rococo.json
@@ -1,17 +1,19 @@
 {
   "--1": "Rococo Types",
+  "RococoAddress": "AccountId",
+  "RococoLookupSource": "AccountId",
   "RococoBalance": "u128",
   "RococoBlockHash": "H256",
   "RococoBlockNumber": "u32",
   "RococoHeader": "Header",
   "--2": "Wococo Types",
+  "WococoAddress": "AccountId",
+  "WococoLookupSource": "AccountId",
   "WococoBalance": "RococoBalance",
   "WococoBlockHash": "RococoBlockHash",
   "WococoBlockNumber": "RococoBlockNumber",
   "WococoHeader": "RococoHeader",
   "--3": "Common types",
-  "Address": "AccountId",
-  "LookupSource": "AccountId",
   "AccountSigner": "MultiSigner",
   "SpecVersion": "u32",
   "RelayerId": "AccountId",
@@ -30,7 +32,7 @@
   "ChainId": "Id",
   "LaneId": "Id",
   "MessageNonce": "u64",
-  "MessageId": "(Id, u64)",
+  "BridgeMessageId": "(Id, u64)",
   "MessageKey": {
     "lane_id": "LaneId",
     "nonce:": "MessageNonce"
@@ -50,9 +52,9 @@
     "dispatch_results": "BitVec"
   },
   "OutboundLaneData": {
-    "latest_generated_nonce": "MessageNonce",
+    "oldest_unpruned_nonce": "MessageNonce",
     "latest_received_nonce": "MessageNonce",
-    "oldest_unpruned_nonce": "MessageNonce"
+    "latest_generated_nonce": "MessageNonce"
   },
   "MessageData": {
     "payload": "MessagePayload",
@@ -132,6 +134,8 @@
     "commit": "Commit",
     "votes_ancestries": "Vec<BridgedHeader>"
   },
+  "Address": "RococoAddress",
+  "LookupSource": "RococoLookupSource",
   "Fee": "RococoBalance",
   "Balance": "RococoBalance",
   "BlockHash": "RococoBlockHash",
diff --git a/polkadot/bridges/deployments/types-wococo.json b/polkadot/bridges/deployments/types-wococo.json
index 1a4084e94cfcadd6b908165d70c2ece083b4c114..562f08afa9c01fe491fdd458bc122daff0cedcf2 100644
--- a/polkadot/bridges/deployments/types-wococo.json
+++ b/polkadot/bridges/deployments/types-wococo.json
@@ -1,17 +1,19 @@
 {
   "--1": "Rococo Types",
+  "RococoAddress": "AccountId",
+  "RococoLookupSource": "AccountId",
   "RococoBalance": "u128",
   "RococoBlockHash": "H256",
   "RococoBlockNumber": "u32",
   "RococoHeader": "Header",
   "--2": "Wococo Types",
+  "WococoAddress": "AccountId",
+  "WococoLookupSource": "AccountId",
   "WococoBalance": "RococoBalance",
   "WococoBlockHash": "RococoBlockHash",
   "WococoBlockNumber": "RococoBlockNumber",
   "WococoHeader": "RococoHeader",
   "--3": "Common types",
-  "Address": "AccountId",
-  "LookupSource": "AccountId",
   "AccountSigner": "MultiSigner",
   "SpecVersion": "u32",
   "RelayerId": "AccountId",
@@ -30,7 +32,7 @@
   "ChainId": "Id",
   "LaneId": "Id",
   "MessageNonce": "u64",
-  "MessageId": "(Id, u64)",
+  "BridgeMessageId": "(Id, u64)",
   "MessageKey": {
     "lane_id": "LaneId",
     "nonce:": "MessageNonce"
@@ -50,9 +52,9 @@
     "dispatch_results": "BitVec"
   },
   "OutboundLaneData": {
-    "latest_generated_nonce": "MessageNonce",
+    "oldest_unpruned_nonce": "MessageNonce",
     "latest_received_nonce": "MessageNonce",
-    "oldest_unpruned_nonce": "MessageNonce"
+    "latest_generated_nonce": "MessageNonce"
   },
   "MessageData": {
     "payload": "MessagePayload",
@@ -132,6 +134,8 @@
     "commit": "Commit",
     "votes_ancestries": "Vec<BridgedHeader>"
   },
+  "Address": "WococoAddress",
+  "LookupSource": "WococoLookupSource",
   "Fee": "WococoBalance",
   "Balance": "WococoBalance",
   "Hash": "WococoBlockHash",
diff --git a/polkadot/bridges/deployments/types/common.json b/polkadot/bridges/deployments/types/common.json
index d3395ea687fddd98baeb72ff9a1abda7e28d795d..4e129f7132beddb0bf1e724846099bf84e3903fe 100644
--- a/polkadot/bridges/deployments/types/common.json
+++ b/polkadot/bridges/deployments/types/common.json
@@ -1,7 +1,5 @@
 {
 	"--3": "Common types",
-	"Address": "AccountId",
-	"LookupSource": "AccountId",
 	"AccountSigner": "MultiSigner",
 	"SpecVersion": "u32",
 	"RelayerId": "AccountId",
@@ -20,7 +18,7 @@
 	"ChainId": "Id",
 	"LaneId": "Id",
 	"MessageNonce": "u64",
-	"MessageId": "(Id, u64)",
+	"BridgeMessageId": "(Id, u64)",
 	"MessageKey": {
 		"lane_id": "LaneId",
 		"nonce:": "MessageNonce"
@@ -40,9 +38,10 @@
 		"dispatch_results": "BitVec"
 	},
 	"OutboundLaneData": {
-		"latest_generated_nonce": "MessageNonce",
+		"oldest_unpruned_nonce": "MessageNonce",
 		"latest_received_nonce": "MessageNonce",
-		"oldest_unpruned_nonce": "MessageNonce"
+		"latest_generated_nonce": "MessageNonce"
+
 	},
 	"MessageData": {
 		"payload": "MessagePayload",
diff --git a/polkadot/bridges/deployments/types/millau.json b/polkadot/bridges/deployments/types/millau.json
index f738701263d533bc81825d5deb64c879f540aa95..589d5619df453162c3d61c03824b5796244da6d6 100644
--- a/polkadot/bridges/deployments/types/millau.json
+++ b/polkadot/bridges/deployments/types/millau.json
@@ -1,4 +1,6 @@
 {
+	"Address": "MillauAddress",
+	"LookupSource": "MillauLookupSource",
 	"Fee": "MillauBalance",
 	"Balance": "MillauBalance",
 	"Hash": "MillauBlockHash",
diff --git a/polkadot/bridges/deployments/types/rialto-millau.json b/polkadot/bridges/deployments/types/rialto-millau.json
index 96efb84fc3bb9cc50ca9034c1c381b2a98225944..971cf666d479e9cb0e6d3f4dd85f4e47d1ae5a13 100644
--- a/polkadot/bridges/deployments/types/rialto-millau.json
+++ b/polkadot/bridges/deployments/types/rialto-millau.json
@@ -1,5 +1,7 @@
 {
 	"--1": "Millau Types",
+	"MillauAddress": "AccountId",
+	"MillauLookupSource": "AccountId",
 	"MillauBalance": "u64",
 	"MillauBlockHash": "H512",
 	"MillauBlockNumber": "u64",
@@ -25,6 +27,8 @@
 		}
 	},
 	"--2": "Rialto Types",
+	"RialtoAddress": "MultiAddress",
+	"RialtoLookupSource": "MultiAddress",
 	"RialtoBalance": "u128",
 	"RialtoBlockHash": "H256",
 	"RialtoBlockNumber": "u32",
diff --git a/polkadot/bridges/deployments/types/rialto.json b/polkadot/bridges/deployments/types/rialto.json
index fe1ba31e8aa3596a54ab76339fc08959886aa90c..77c30b7cc2d7b052bbc635e900a583a8aa6dc55c 100644
--- a/polkadot/bridges/deployments/types/rialto.json
+++ b/polkadot/bridges/deployments/types/rialto.json
@@ -1,4 +1,6 @@
 {
+	"Address": "RialtoAddress",
+	"LookupSource": "RialtoLookupSource",
 	"Fee": "RialtoBalance",
 	"Balance": "RialtoBalance",
 	"BlockHash": "RialtoBlockHash",
@@ -10,5 +12,6 @@
 		"_enum": {
 			"RialtoToMillauConversionRate": "u128"
 		}
-	}
+	},
+	"ValidationCodeHash": "H256"
 }
diff --git a/polkadot/bridges/deployments/types/rococo-wococo.json b/polkadot/bridges/deployments/types/rococo-wococo.json
index b1c4cfa21b92cf2acb45c90c34aeaf2168d1e4ee..e0864c2ffb0b3181408624d154617f66048287be 100644
--- a/polkadot/bridges/deployments/types/rococo-wococo.json
+++ b/polkadot/bridges/deployments/types/rococo-wococo.json
@@ -1,10 +1,14 @@
 {
 	"--1": "Rococo Types",
+	"RococoAddress": "AccountId",
+	"RococoLookupSource": "AccountId",
 	"RococoBalance": "u128",
 	"RococoBlockHash": "H256",
 	"RococoBlockNumber": "u32",
 	"RococoHeader": "Header",
 	"--2": "Wococo Types",
+	"WococoAddress": "AccountId",
+	"WococoLookupSource": "AccountId",
 	"WococoBalance": "RococoBalance",
 	"WococoBlockHash": "RococoBlockHash",
 	"WococoBlockNumber": "RococoBlockNumber",
diff --git a/polkadot/bridges/deployments/types/rococo.json b/polkadot/bridges/deployments/types/rococo.json
index 4576378fd47920ea2a979fdbb0480abbb23f624c..fa1bf2750095d976404089d5f09773d9096df42f 100644
--- a/polkadot/bridges/deployments/types/rococo.json
+++ b/polkadot/bridges/deployments/types/rococo.json
@@ -1,4 +1,6 @@
 {
+	"Address": "RococoAddress",
+	"LookupSource": "RococoLookupSource",
 	"Fee": "RococoBalance",
 	"Balance": "RococoBalance",
 	"BlockHash": "RococoBlockHash",
diff --git a/polkadot/bridges/deployments/types/wococo.json b/polkadot/bridges/deployments/types/wococo.json
index cc01a6ccecfb9e613893b8cc77b57e2375cc2a65..7c7b4ff27688ee34db6763e153dd910f3f4aadc9 100644
--- a/polkadot/bridges/deployments/types/wococo.json
+++ b/polkadot/bridges/deployments/types/wococo.json
@@ -1,4 +1,6 @@
 {
+	"Address": "WococoAddress",
+	"LookupSource": "WococoLookupSource",
 	"Fee": "WococoBalance",
 	"Balance": "WococoBalance",
 	"Hash": "WococoBlockHash",
diff --git a/polkadot/bridges/deployments/ui/README.md b/polkadot/bridges/deployments/ui/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad946fc699bf89cc05d675267013fc77ff4db36a
--- /dev/null
+++ b/polkadot/bridges/deployments/ui/README.md
@@ -0,0 +1,23 @@
+# bridges-ui
+
+This is a Bridges UI docker configuration file. The source of the Bridges UI code
+can be found in [the repository](https://github.com/paritytech/parity-bridges-ui).
+The CI should create and publish a docker image that is used by this configuration
+file, so that the code is always using the latest version.
+The UI is configured to point to local Rialto and Millau nodes to retrieve the require
+data.
+
+This image can be used together with `nginx-proxy` to expose the UI externally. See
+`VIRTUAL_*` and `LETSENCRYPT_*` environment variables.
+
+After start the UI is available at `http://localhost:8080`
+
+## How to?
+
+In current directory:
+```bash
+docker-compose up -d
+```
+
+Then start `rialto` & `millau` networks with the same command (one folder up) or
+run the full setup by using `../run.sh` script.
diff --git a/polkadot/bridges/deployments/ui/docker-compose.yml b/polkadot/bridges/deployments/ui/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0a35fa98e2347c57807b0834e674698df13660d8
--- /dev/null
+++ b/polkadot/bridges/deployments/ui/docker-compose.yml
@@ -0,0 +1,13 @@
+version: '3.5'
+services:
+  bridges-ui:
+    image: paritytech/parity-bridges-ui
+    environment:
+      VIRTUAL_HOST: ui.brucke.link
+      VIRTUAL_PORT: 80
+      LETSENCRYPT_HOST: ui.brucke.link
+      LETSENCRYPT_EMAIL: admin@parity.io
+      CHAIN_1_SUBSTRATE_PROVIDER: ws://localhost:9944
+      CHAIN_2_SUBSTRATE_PROVIDER: ws://localhost:19944
+    ports:
+      - "8080:80"
diff --git a/polkadot/bridges/docs/high-level-overview.md b/polkadot/bridges/docs/high-level-overview.md
index 9ca3ca42ff5e51eab3c1cbd5ee9181a5384f9ed0..2642c20c86abbc723f385d96b1f02e8179020f12 100644
--- a/polkadot/bridges/docs/high-level-overview.md
+++ b/polkadot/bridges/docs/high-level-overview.md
@@ -48,33 +48,21 @@ High level sequence charts of the process can be found in [a separate document](
 
 ### Substrate (GRANDPA) Header Sync
 
-The header sync pallet (`pallet-substrate-bridge`) is an on-chain light client for chains which use
-GRANDPA finality. It is part of the target chain's runtime, and accepts headers from the source
-chain. Its main goals are to accept valid headers, track GRANDPA finality set changes, and verify
-GRANDPA finality proofs (a.k.a justifications).
+The header sync pallet (`pallet-bridge-grandpa`) is an on-chain light client for chains which use
+GRANDPA finality. It is part of the target chain's runtime, and accepts finality proofs from the source
+chain. Verify GRANDPA finality proofs (a.k.a justifications) and track GRANDPA finality set changes.
 
 The pallet does not care about what block production mechanism is used for the source chain
-(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. Due to this it is possible for
-the pallet to import (but not necessarily finalize) headers which are _not_ valid according to the
-source chain's block production mechanism.
+(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. In fact the pallet does not
+necessarily store all produced headers, we only import headers with valid GRANDPA justifications.
 
-The pallet has support for tracking forks and uses the longest chain rule to determine what the
-canonical chain is. The pallet allows headers to be imported on a different fork from the canonical
-one as long as the headers being imported don't conflict with already finalized headers (for
-example, it will not allow importing a header at a lower height than the best finalized header).
-
-When tracking authority set changes, the pallet - unlike the full GRANDPA protocol - does not
-support tracking multiple authority set changes across forks. Each fork can have at most one pending
-authority set change. This is done to prevent DoS attacks if GRANDPA on the source chain were to
-stall for a long time (the pallet would have to do a lot of expensive ancestry checks to catch up).
-
-Referer to the [pallet documentation](../modules/substrate/src/lib.rs) for more details.
+Referer to the [pallet documentation](../modules/grandpa/src/lib.rs) for more details.
 
 #### Header Relayer strategy
 
 There is currently no reward strategy for the relayers at all. They also are not required to be
 staked or registered on-chain, unlike in other bridge designs. We consider the header sync to be
-an essential part of the bridge and the incentivisation should be happening on the higher layers.
+an essential part of the bridge and the incentivization should be happening on the higher layers.
 
 At the moment, signed transactions are the only way to submit headers to the header sync pallet.
 However, in the future we would like to use  unsigned transactions for headers delivery. This will
@@ -110,7 +98,7 @@ Users of the pallet add their messages to an "outbound lane" on the source chain
 finalized message relayers are responsible for reading the current queue of messages and submitting
 some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce`
 associated with it, which serves as the ordering of messages. The inbound lane stores the last
-delivered nonce to prevent replaying messages. To succesfuly deliver the message to the inbound lane
+delivered nonce to prevent replaying messages. To successfully deliver the message to the inbound lane
 on target chain the relayer has to present present a storage proof which shows that the message was
 part of the outbound lane on the source chain.
 
diff --git a/polkadot/bridges/docs/send-message.md b/polkadot/bridges/docs/send-message.md
index 91d3bfd976b58f4a6cf055445181a4ba51d8bdfd..6984c56d67f2a87c7b5d3939750db4fccc8d85c1 100644
--- a/polkadot/bridges/docs/send-message.md
+++ b/polkadot/bridges/docs/send-message.md
@@ -46,22 +46,22 @@ FLAGS:
 
 SUBCOMMANDS:
     help                Prints this message or the help of the given subcommand(s)
-    MillauToRialto      Submit message to given Millau -> Rialto lane
-    RialtoToMillau      Submit message to given Rialto -> Millau lane
+    millau-to-rialto      Submit message to given Millau -> Rialto lane
+    rialto-to-millau      Submit message to given Rialto -> Millau lane
 
 ```
 Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle
 both, message transport and message dispatch. There is one command for submitting a message to each of the two
-available bridges, namely `MillauToRialto` and `RialtoToMillau`.
+available bridges, namely `millau-to-rialto` and `rialto-to-millau`.
 
 Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same
-for both submit message commands, hence only the output for `MillauToRialto` is shown below.
+for both submit message commands, hence only the output for `millau-to-rialto` is shown below.
 
 ```
 Submit message to given Millau -> Rialto lane
 
 USAGE:
-    substrate-relay send-message MillauToRialto [OPTIONS] --lane <lane> --source-host <source-host> --source-port <source-port> --source-signer <source-signer> --origin <origin> --target-signer <target-signer> <SUBCOMMAND>
+    substrate-relay send-message millau-to-rialto [OPTIONS] --lane <lane> --source-host <source-host> --source-port <source-port> --source-signer <source-signer> --origin <origin> --target-signer <target-signer> <SUBCOMMAND>
 
 FLAGS:
     -h, --help       Prints help information
@@ -104,7 +104,7 @@ Usage of the arguments is best explained with an example. Below you can see, how
 would look like:
 
 ```
-substrate-relay send-message MillauToRialto \
+substrate-relay send-message millau-to-rialto \
 		--source-host=127.0.0.1 \
 		--source-port=10946 \
 		--source-signer=//Dave \
diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.toml b/polkadot/bridges/fuzz/storage-proof/Cargo.toml
index 43e58ddb73e98df6c2ddba1811e490790ea0c622..c4da57b255c83de4c8c14d77b2cd46e06785dcf7 100644
--- a/polkadot/bridges/fuzz/storage-proof/Cargo.toml
+++ b/polkadot/bridges/fuzz/storage-proof/Cargo.toml
@@ -8,27 +8,17 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-finality-grandpa = "0.14.4"
-hash-db = "0.15.2"
 honggfuzz = "0.5.54"
 log = "0.4.0"
 env_logger = "0.8.3"
 
 # Bridge Dependencies
 
-bp-header-chain = { path = "../../primitives/header-chain" }
 bp-runtime = { path = "../../primitives/runtime" }
-bp-test-utils = { path = "../../primitives/test-utils" }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/fuzz/storage-proof/README.md b/polkadot/bridges/fuzz/storage-proof/README.md
index da3c7b1565e037c7fd6b3a63b7d59289b357ce34..1eeec7562a9614bebe9738701e0929852c94ac5c 100644
--- a/polkadot/bridges/fuzz/storage-proof/README.md
+++ b/polkadot/bridges/fuzz/storage-proof/README.md
@@ -6,7 +6,10 @@ Install dependencies:
 ```
 $ sudo apt install build-essential binutils-dev libunwind-dev
 ```
-
+or on nix:
+```
+$ nix-shell -p honggfuzz
+```
 
 Install `cargo hfuzz` plugin:
 ```
@@ -29,4 +32,3 @@ HFUZZ_RUN_ARGS="-t 1 -n 12 -v -N 1000000 --exit_upon_crash" cargo hfuzz run exam
 ```
 
 More details in the [official documentation](https://docs.rs/honggfuzz/0.5.52/honggfuzz/#about-honggfuzz).
-
diff --git a/polkadot/bridges/fuzz/storage-proof/src/main.rs b/polkadot/bridges/fuzz/storage-proof/src/main.rs
index 18be72e72f228b98ae6e81828de57be2d8b9ed0a..42636a65c3dca676be449378713194a247b4442a 100644
--- a/polkadot/bridges/fuzz/storage-proof/src/main.rs
+++ b/polkadot/bridges/fuzz/storage-proof/src/main.rs
@@ -28,10 +28,8 @@ use sp_trie::StorageProof;
 use std::collections::HashMap;
 
 fn craft_known_storage_proof(input_vec: Vec<(Vec<u8>, Vec<u8>)>) -> (H256, StorageProof) {
-	let storage_proof_vec = vec![(
-		None,
-		input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect(),
-	)];
+	let storage_proof_vec =
+		vec![(None, input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect())];
 	log::info!("Storage proof vec {:?}", storage_proof_vec);
 	let backend = <InMemoryBackend<Blake2Hasher>>::from(storage_proof_vec);
 	let root = backend.storage_root(std::iter::empty()).0;
@@ -47,7 +45,7 @@ fn craft_known_storage_proof(input_vec: Vec<(Vec<u8>, Vec<u8>)>) -> (H256, Stora
 fn transform_into_unique(input_vec: Vec<(Vec<u8>, Vec<u8>)>) -> Vec<(Vec<u8>, Vec<u8>)> {
 	let mut output_hashmap = HashMap::new();
 	let mut output_vec = Vec::new();
-	for key_value_pair in input_vec.clone() {
+	for key_value_pair in input_vec {
 		output_hashmap.insert(key_value_pair.0, key_value_pair.1); //Only 1 value per key
 	}
 	for (key, val) in output_hashmap.iter() {
@@ -59,18 +57,16 @@ fn transform_into_unique(input_vec: Vec<(Vec<u8>, Vec<u8>)>) -> Vec<(Vec<u8>, Ve
 fn run_fuzzer() {
 	fuzz!(|input_vec: Vec<(Vec<u8>, Vec<u8>)>| {
 		if input_vec.is_empty() {
-			return;
+			return
 		}
 		let unique_input_vec = transform_into_unique(input_vec);
 		let (root, craft_known_storage_proof) = craft_known_storage_proof(unique_input_vec.clone());
-		let checker = <bp_runtime::StorageProofChecker<Blake2Hasher>>::new(root, craft_known_storage_proof)
-			.expect("Valid proof passed; qed");
+		let checker =
+			<bp_runtime::StorageProofChecker<Blake2Hasher>>::new(root, craft_known_storage_proof)
+				.expect("Valid proof passed; qed");
 		for key_value_pair in unique_input_vec {
 			log::info!("Reading value for pair {:?}", key_value_pair);
-			assert_eq!(
-				checker.read_value(&key_value_pair.0),
-				Ok(Some(key_value_pair.1.clone()))
-			);
+			assert_eq!(checker.read_value(&key_value_pair.0), Ok(Some(key_value_pair.1.clone())));
 		}
 	})
 }
diff --git a/polkadot/bridges/modules/currency-exchange/Cargo.toml b/polkadot/bridges/modules/currency-exchange/Cargo.toml
index 160a652e7c6735b58857808ce431a8d8d4ca7866..7595a35ab022abf0c21de12eb1fc40ee92a89a91 100644
--- a/polkadot/bridges/modules/currency-exchange/Cargo.toml
+++ b/polkadot/bridges/modules/currency-exchange/Cargo.toml
@@ -7,8 +7,9 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0", optional = true }
 
 # Bridge dependencies
@@ -18,11 +19,11 @@ bp-header-chain = { path = "../../primitives/header-chain", default-features = f
 
 # Substrate Dependencies
 
-frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
 
 [dev-dependencies]
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
@@ -38,6 +39,7 @@ std = [
 	"frame-support/std",
 	"frame-system/std",
 	"log/std",
+	"scale-info/std",
 	"serde",
 	"sp-runtime/std",
 	"sp-std/std",
diff --git a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs
index 74da4c1b7ec48a7c464e25c08876c35ea47bac8a..3a31da0d0a91223c9c1c46e5b819a985be091731 100644
--- a/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs
+++ b/polkadot/bridges/modules/currency-exchange/src/benchmarking.rs
@@ -19,11 +19,12 @@
 //! before invoking module calls.
 
 use super::{
-	Call, Config as CurrencyExchangeConfig, InclusionProofVerifier, Instance, Pallet as CurrencyExchangePallet,
+	Call, Config as CurrencyExchangeConfig, InclusionProofVerifier,
+	Pallet as CurrencyExchangePallet,
 };
 use sp_std::prelude::*;
 
-use frame_benchmarking::{account, benchmarks_instance};
+use frame_benchmarking::{account, benchmarks_instance_pallet};
 use frame_system::RawOrigin;
 
 const SEED: u32 = 0;
@@ -31,7 +32,7 @@ const WORST_TX_SIZE_FACTOR: u32 = 1000;
 const WORST_PROOF_SIZE_FACTOR: u32 = 1000;
 
 /// Pallet we're benchmarking here.
-pub struct Pallet<T: Config<I>, I: Instance>(CurrencyExchangePallet<T, I>);
+pub struct Pallet<T: Config<I>, I: 'static>(CurrencyExchangePallet<T, I>);
 
 /// Proof benchmarking parameters.
 pub struct ProofParams<Recipient> {
@@ -39,8 +40,8 @@ pub struct ProofParams<Recipient> {
 	pub recipient: Recipient,
 	/// When true, recipient must exists before import.
 	pub recipient_exists: bool,
-	/// When 0, transaction should have minimal possible size. When this value has non-zero value n,
-	/// transaction size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`.
+	/// When 0, transaction should have minimal possible size. When this value has non-zero value
+	/// n, transaction size should be (if possible) near to MIN_SIZE + n * SIZE_FACTOR.
 	pub transaction_size_factor: u32,
 	/// When 0, proof should have minimal possible size. When this value has non-zero value n,
 	/// proof size should be (if possible) near to `MIN_SIZE + n * SIZE_FACTOR`.
@@ -48,14 +49,14 @@ pub struct ProofParams<Recipient> {
 }
 
 /// Config that must be implemented by runtime.
-pub trait Config<I: Instance>: CurrencyExchangeConfig<I> {
+pub trait Config<I: 'static>: CurrencyExchangeConfig<I> {
 	/// Prepare proof for importing exchange transaction.
 	fn make_proof(
 		proof_params: ProofParams<Self::AccountId>,
 	) -> <<Self as CurrencyExchangeConfig<I>>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof;
 }
 
-benchmarks_instance! {
+benchmarks_instance_pallet! {
 	// Benchmark `import_peer_transaction` extrinsic with the best possible conditions:
 	// * Proof is the transaction itself.
 	// * Transaction has minimal size.
diff --git a/polkadot/bridges/modules/currency-exchange/src/lib.rs b/polkadot/bridges/modules/currency-exchange/src/lib.rs
index 79dd659283df31e3457382bff5cdde053fec903c..d2a3e61024c075daf0e16bbf1b768a810eb7fedc 100644
--- a/polkadot/bridges/modules/currency-exchange/src/lib.rs
+++ b/polkadot/bridges/modules/currency-exchange/src/lib.rs
@@ -19,11 +19,11 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use bp_currency_exchange::{
-	CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction, RecipientsMap,
+	CurrencyConverter, DepositInto, Error as ExchangeError, MaybeLockFundsTransaction,
+	RecipientsMap,
 };
 use bp_header_chain::InclusionProofVerifier;
-use frame_support::{decl_error, decl_module, decl_storage, ensure};
-use sp_runtime::DispatchResult;
+use frame_support::ensure;
 
 #[cfg(feature = "runtime-benchmarks")]
 pub mod benchmarking;
@@ -34,61 +34,53 @@ pub trait OnTransactionSubmitted<AccountId> {
 	fn on_valid_transaction_submitted(submitter: AccountId);
 }
 
-/// The module configuration trait
-pub trait Config<I = DefaultInstance>: frame_system::Config {
-	/// Handler for transaction submission result.
-	type OnTransactionSubmitted: OnTransactionSubmitted<Self::AccountId>;
-	/// Represents the blockchain that we'll be exchanging currency with.
-	type PeerBlockchain: InclusionProofVerifier;
-	/// Peer blockchain transaction parser.
-	type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction<
-		Transaction = <Self::PeerBlockchain as InclusionProofVerifier>::Transaction,
-	>;
-	/// Map between blockchains recipients.
-	type RecipientsMap: RecipientsMap<
-		PeerRecipient = <Self::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Recipient,
-		Recipient = Self::AccountId,
-	>;
-	/// This blockchain currency amount type.
-	type Amount;
-	/// Converter from peer blockchain currency type into current blockchain currency type.
-	type CurrencyConverter: CurrencyConverter<
-		SourceAmount = <Self::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Amount,
-		TargetAmount = Self::Amount,
-	>;
-	/// Something that could grant money.
-	type DepositInto: DepositInto<Recipient = Self::AccountId, Amount = Self::Amount>;
-}
+pub use pallet::*;
 
-decl_error! {
-	pub enum Error for Pallet<T: Config<I>, I: Instance> {
-		/// Invalid peer blockchain transaction provided.
-		InvalidTransaction,
-		/// Peer transaction has invalid amount.
-		InvalidAmount,
-		/// Peer transaction has invalid recipient.
-		InvalidRecipient,
-		/// Cannot map from peer recipient to this blockchain recipient.
-		FailedToMapRecipients,
-		/// Failed to convert from peer blockchain currency to this blockchain currency.
-		FailedToConvertCurrency,
-		/// Deposit has failed.
-		DepositFailed,
-		/// Deposit has partially failed (changes to recipient account were made).
-		DepositPartiallyFailed,
-		/// Transaction is not finalized.
-		UnfinalizedTransaction,
-		/// Transaction funds are already claimed.
-		AlreadyClaimed,
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::config]
+	pub trait Config<I: 'static = ()>: frame_system::Config {
+		/// Handler for transaction submission result.
+		type OnTransactionSubmitted: OnTransactionSubmitted<Self::AccountId>;
+		/// Represents the blockchain that we'll be exchanging currency with.
+		type PeerBlockchain: InclusionProofVerifier;
+		/// Peer blockchain transaction parser.
+		type PeerMaybeLockFundsTransaction: MaybeLockFundsTransaction<
+			Transaction = <Self::PeerBlockchain as InclusionProofVerifier>::Transaction,
+		>;
+		/// Map between blockchains recipients.
+		type RecipientsMap: RecipientsMap<
+			PeerRecipient = <Self::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Recipient,
+			Recipient = Self::AccountId,
+		>;
+		/// This blockchain currency amount type.
+		type Amount;
+		/// Converter from peer blockchain currency type into current blockchain currency type.
+		type CurrencyConverter: CurrencyConverter<
+			SourceAmount = <Self::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Amount,
+			TargetAmount = Self::Amount,
+		>;
+		/// Something that could grant money.
+		type DepositInto: DepositInto<Recipient = Self::AccountId, Amount = Self::Amount>;
 	}
-}
 
-decl_module! {
-	pub struct Module<T: Config<I>, I: Instance = DefaultInstance> for enum Call where origin: T::Origin {
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
+
+	#[pallet::hooks]
+	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {}
+
+	#[pallet::call]
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		/// Imports lock fund transaction of the peer blockchain.
-		#[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
+		#[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
 		pub fn import_peer_transaction(
-			origin,
+			origin: OriginFor<T>,
 			proof: <<T as Config<I>>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof,
 		) -> DispatchResult {
 			let submitter = frame_system::ensure_signed(origin)?;
@@ -101,7 +93,8 @@ decl_module! {
 			{
 				// if any changes were made to the storage, we can't just return error here, because
 				// otherwise the same proof may be imported again
-				let deposit_result = T::DepositInto::deposit_into(deposit.recipient, deposit.amount);
+				let deposit_result =
+					T::DepositInto::deposit_into(deposit.recipient, deposit.amount);
 				match deposit_result {
 					Ok(_) => (),
 					Err(ExchangeError::DepositPartiallyFailed) => (),
@@ -122,16 +115,41 @@ decl_module! {
 			Ok(())
 		}
 	}
-}
 
-decl_storage! {
-	trait Store for Pallet<T: Config<I>, I: Instance = DefaultInstance> as Bridge {
-		/// All transfers that have already been claimed.
-		Transfers: map hasher(blake2_128_concat) <T::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Id => ();
+	#[pallet::error]
+	pub enum Error<T, I = ()> {
+		/// Invalid peer blockchain transaction provided.
+		InvalidTransaction,
+		/// Peer transaction has invalid amount.
+		InvalidAmount,
+		/// Peer transaction has invalid recipient.
+		InvalidRecipient,
+		/// Cannot map from peer recipient to this blockchain recipient.
+		FailedToMapRecipients,
+		/// Failed to convert from peer blockchain currency to this blockchain currency.
+		FailedToConvertCurrency,
+		/// Deposit has failed.
+		DepositFailed,
+		/// Deposit has partially failed (changes to recipient account were made).
+		DepositPartiallyFailed,
+		/// Transaction is not finalized.
+		UnfinalizedTransaction,
+		/// Transaction funds are already claimed.
+		AlreadyClaimed,
 	}
+
+	/// All transfers that have already been claimed.
+	#[pallet::storage]
+	pub(super) type Transfers<T: Config<I>, I: 'static = ()> = StorageMap<
+		_,
+		Blake2_128Concat,
+		<T::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Id,
+		(),
+		ValueQuery,
+	>;
 }
 
-impl<T: Config<I>, I: Instance> Pallet<T, I> {
+impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	/// Returns true if currency exchange module is able to import given transaction proof in
 	/// its current state.
 	pub fn filter_transaction_proof(
@@ -144,14 +162,14 @@ impl<T: Config<I>, I: Instance> Pallet<T, I> {
 				err,
 			);
 
-			return false;
+			return false
 		}
 
 		true
 	}
 }
 
-impl<T: Config<I>, I: Instance> From<ExchangeError> for Error<T, I> {
+impl<T: Config<I>, I: 'static> From<ExchangeError> for Error<T, I> {
 	fn from(error: ExchangeError) -> Self {
 		match error {
 			ExchangeError::InvalidTransaction => Error::InvalidTransaction,
@@ -170,7 +188,7 @@ impl<AccountId> OnTransactionSubmitted<AccountId> for () {
 }
 
 /// Exchange deposit details.
-struct DepositDetails<T: Config<I>, I: Instance> {
+struct DepositDetails<T: Config<I>, I: 'static> {
 	/// Transfer id.
 	pub transfer_id: <T::PeerMaybeLockFundsTransaction as MaybeLockFundsTransaction>::Id,
 	/// Transfer recipient.
@@ -181,7 +199,7 @@ struct DepositDetails<T: Config<I>, I: Instance> {
 
 /// Verify and parse transaction proof, preparing everything required for importing
 /// this transaction proof.
-fn prepare_deposit_details<T: Config<I>, I: Instance>(
+fn prepare_deposit_details<T: Config<I>, I: 'static>(
 	proof: &<<T as Config<I>>::PeerBlockchain as InclusionProofVerifier>::TransactionInclusionProof,
 ) -> Result<DepositDetails<T, I>, Error<T, I>> {
 	// ensure that transaction is included in finalized block that we know of
@@ -189,23 +207,16 @@ fn prepare_deposit_details<T: Config<I>, I: Instance>(
 		.ok_or(Error::<T, I>::UnfinalizedTransaction)?;
 
 	// parse transaction
-	let transaction =
-		<T as Config<I>>::PeerMaybeLockFundsTransaction::parse(&transaction).map_err(Error::<T, I>::from)?;
+	let transaction = <T as Config<I>>::PeerMaybeLockFundsTransaction::parse(&transaction)
+		.map_err(Error::<T, I>::from)?;
 	let transfer_id = transaction.id;
-	ensure!(
-		!Transfers::<T, I>::contains_key(&transfer_id),
-		Error::<T, I>::AlreadyClaimed
-	);
+	ensure!(!Transfers::<T, I>::contains_key(&transfer_id), Error::<T, I>::AlreadyClaimed);
 
 	// grant recipient
 	let recipient = T::RecipientsMap::map(transaction.recipient).map_err(Error::<T, I>::from)?;
 	let amount = T::CurrencyConverter::convert(transaction.amount).map_err(Error::<T, I>::from)?;
 
-	Ok(DepositDetails {
-		transfer_id,
-		recipient,
-		amount,
-	})
+	Ok(DepositDetails { transfer_id, recipient, amount })
 }
 
 #[cfg(test)]
@@ -215,7 +226,9 @@ mod tests {
 
 	use super::*;
 	use bp_currency_exchange::LockFundsTransaction;
-	use frame_support::{assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight};
+	use frame_support::{
+		assert_noop, assert_ok, construct_runtime, parameter_types, weights::Weight,
+	};
 	use sp_core::H256;
 	use sp_runtime::{
 		testing::Header,
@@ -238,7 +251,7 @@ mod tests {
 
 	impl OnTransactionSubmitted<AccountId> for DummyTransactionSubmissionHandler {
 		fn on_valid_transaction_submitted(submitter: AccountId) {
-			Transfers::<TestRuntime, DefaultInstance>::insert(submitter, ());
+			Transfers::<TestRuntime, ()>::insert(submitter, ());
 		}
 	}
 
@@ -248,7 +261,9 @@ mod tests {
 		type Transaction = RawTransaction;
 		type TransactionInclusionProof = (bool, RawTransaction);
 
-		fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<RawTransaction> {
+		fn verify_transaction_inclusion_proof(
+			proof: &Self::TransactionInclusionProof,
+		) -> Option<RawTransaction> {
 			if proof.0 {
 				Some(proof.1.clone())
 			} else {
@@ -279,7 +294,9 @@ mod tests {
 		type PeerRecipient = AccountId;
 		type Recipient = AccountId;
 
-		fn map(peer_recipient: Self::PeerRecipient) -> bp_currency_exchange::Result<Self::Recipient> {
+		fn map(
+			peer_recipient: Self::PeerRecipient,
+		) -> bp_currency_exchange::Result<Self::Recipient> {
 			match peer_recipient {
 				UNKNOWN_RECIPIENT_ID => Err(ExchangeError::FailedToMapRecipients),
 				_ => Ok(peer_recipient * 10),
@@ -307,10 +324,14 @@ mod tests {
 		type Recipient = AccountId;
 		type Amount = u64;
 
-		fn deposit_into(_recipient: Self::Recipient, amount: Self::Amount) -> bp_currency_exchange::Result<()> {
+		fn deposit_into(
+			_recipient: Self::Recipient,
+			amount: Self::Amount,
+		) -> bp_currency_exchange::Result<()> {
 			match amount {
 				amount if amount < MAX_DEPOSIT_AMOUNT * 10 => Ok(()),
-				amount if amount == MAX_DEPOSIT_AMOUNT * 10 => Err(ExchangeError::DepositPartiallyFailed),
+				amount if amount == MAX_DEPOSIT_AMOUNT * 10 =>
+					Err(ExchangeError::DepositPartiallyFailed),
 				_ => Err(ExchangeError::DepositFailed),
 			}
 		}
@@ -375,26 +396,23 @@ mod tests {
 	}
 
 	fn new_test_ext() -> sp_io::TestExternalities {
-		let t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 		sp_io::TestExternalities::new(t)
 	}
 
 	fn transaction(id: u64) -> RawTransaction {
-		RawTransaction {
-			id,
-			recipient: 1,
-			amount: 2,
-		}
+		RawTransaction { id, recipient: 1, amount: 2 }
 	}
 
 	#[test]
 	fn unfinalized_transaction_rejected() {
 		new_test_ext().execute_with(|| {
 			assert_noop!(
-				Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (false, transaction(0))),
-				Error::<TestRuntime, DefaultInstance>::UnfinalizedTransaction,
+				Exchange::import_peer_transaction(
+					Origin::signed(SUBMITTER),
+					(false, transaction(0))
+				),
+				Error::<TestRuntime, ()>::UnfinalizedTransaction,
 			);
 		});
 	}
@@ -407,7 +425,7 @@ mod tests {
 					Origin::signed(SUBMITTER),
 					(true, transaction(INVALID_TRANSACTION_ID)),
 				),
-				Error::<TestRuntime, DefaultInstance>::InvalidTransaction,
+				Error::<TestRuntime, ()>::InvalidTransaction,
 			);
 		});
 	}
@@ -421,7 +439,7 @@ mod tests {
 					Origin::signed(SUBMITTER),
 					(true, transaction(ALREADY_CLAIMED_TRANSACTION_ID)),
 				),
-				Error::<TestRuntime, DefaultInstance>::AlreadyClaimed,
+				Error::<TestRuntime, ()>::AlreadyClaimed,
 			);
 		});
 	}
@@ -433,7 +451,7 @@ mod tests {
 			transaction.recipient = UNKNOWN_RECIPIENT_ID;
 			assert_noop!(
 				Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)),
-				Error::<TestRuntime, DefaultInstance>::FailedToMapRecipients,
+				Error::<TestRuntime, ()>::FailedToMapRecipients,
 			);
 		});
 	}
@@ -445,7 +463,7 @@ mod tests {
 			transaction.amount = INVALID_AMOUNT;
 			assert_noop!(
 				Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)),
-				Error::<TestRuntime, DefaultInstance>::FailedToConvertCurrency,
+				Error::<TestRuntime, ()>::FailedToConvertCurrency,
 			);
 		});
 	}
@@ -457,7 +475,7 @@ mod tests {
 			transaction.amount = MAX_DEPOSIT_AMOUNT + 1;
 			assert_noop!(
 				Exchange::import_peer_transaction(Origin::signed(SUBMITTER), (true, transaction)),
-				Error::<TestRuntime, DefaultInstance>::DepositFailed,
+				Error::<TestRuntime, ()>::DepositFailed,
 			);
 		});
 	}
diff --git a/polkadot/bridges/modules/dispatch/Cargo.toml b/polkadot/bridges/modules/dispatch/Cargo.toml
index fb601a70a1e2e8737c2ab91b1b48546243901c2f..1d91d0a0509420ecb0fea19cbe4c0e84003101c5 100644
--- a/polkadot/bridges/modules/dispatch/Cargo.toml
+++ b/polkadot/bridges/modules/dispatch/Cargo.toml
@@ -7,9 +7,9 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
-scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Bridge dependencies
 
@@ -18,15 +18,14 @@ bp-runtime = { path = "../../primitives/runtime", default-features = false }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
-serde = "1.0"
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/modules/dispatch/src/lib.rs b/polkadot/bridges/modules/dispatch/src/lib.rs
index 00858272365a2d77314fab2e29f452409f3aabfa..f467bab0d9464829698fc54cbd7ff9db1044556f 100644
--- a/polkadot/bridges/modules/dispatch/src/lib.rs
+++ b/polkadot/bridges/modules/dispatch/src/lib.rs
@@ -22,123 +22,139 @@
 //! a successful dispatch an event is emitted.
 
 #![cfg_attr(not(feature = "std"), no_std)]
-#![warn(missing_docs)]
 // Generated by `decl_event!`
 #![allow(clippy::unused_unit)]
 
-use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion, Weight};
+use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion};
 use bp_runtime::{
 	derive_account_id,
 	messages::{DispatchFeePayment, MessageDispatchResult},
 	ChainId, SourceAccount,
 };
-use codec::{Decode, Encode};
+use codec::Encode;
 use frame_support::{
-	decl_event, decl_module, decl_storage,
-	dispatch::{Dispatchable, Parameter},
+	dispatch::Dispatchable,
 	ensure,
 	traits::{Contains, Get},
 	weights::{extract_actual_weight, GetDispatchInfo},
 };
 use frame_system::RawOrigin;
-use sp_runtime::{
-	traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, MaybeSerializeDeserialize, Member, Verify},
-	DispatchResult,
-};
-use sp_std::{fmt::Debug, marker::PhantomData, prelude::*};
-
-/// The module configuration trait.
-pub trait Config<I = DefaultInstance>: frame_system::Config {
-	/// The overarching event type.
-	type Event: From<Event<Self, I>> + Into<<Self as frame_system::Config>::Event>;
-	/// Id of the message. Whenever message is passed to the dispatch module, it emits
-	/// event with this id + dispatch result. Could be e.g. (`LaneId`, `MessageNonce`) if
-	/// it comes from the messages module.
-	type MessageId: Parameter;
-	/// Type of account ID on source chain.
-	type SourceChainAccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default;
-	/// Type of account public key on target chain.
-	type TargetChainAccountPublic: Parameter + IdentifyAccount<AccountId = Self::AccountId>;
-	/// Type of signature that may prove that the message has been signed by
-	/// owner of `TargetChainAccountPublic`.
-	type TargetChainSignature: Parameter + Verify<Signer = Self::TargetChainAccountPublic>;
-	/// The overarching dispatch call type.
-	type Call: Parameter
-		+ GetDispatchInfo
-		+ Dispatchable<
-			Origin = <Self as frame_system::Config>::Origin,
-			PostInfo = frame_support::dispatch::PostDispatchInfo,
-		>;
-	/// Pre-dispatch filter for incoming calls.
-	///
-	/// The pallet will filter all incoming calls right before they're dispatched. If this filter
-	/// rejects the call, special event (`Event::MessageCallRejected`) is emitted.
-	type CallFilter: Contains<<Self as Config<I>>::Call>;
-	/// The type that is used to wrap the `Self::Call` when it is moved over bridge.
-	///
-	/// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure
-	/// that all other stuff (like `spec_version`) is OK. If we would try to decode
-	/// `Call` which has been encoded using previous `spec_version`, then we might end
-	/// up with decoding error, instead of `MessageVersionSpecMismatch`.
-	type EncodedCall: Decode + Encode + Into<Result<<Self as Config<I>>::Call, ()>>;
-	/// A type which can be turned into an `AccountId` from a 256-bit hash.
-	///
-	/// Used when deriving target chain `AccountId`s from source chain `AccountId`s.
-	type AccountIdConverter: sp_runtime::traits::Convert<sp_core::hash::H256, Self::AccountId>;
-}
+use sp_runtime::traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, Verify};
+use sp_std::{fmt::Debug, prelude::*};
 
-decl_storage! {
-	trait Store for Pallet<T: Config<I>, I: Instance = DefaultInstance> as Dispatch {}
-}
+pub use pallet::*;
+
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::config]
+	pub trait Config<I: 'static = ()>: frame_system::Config {
+		/// The overarching event type.
+		type Event: From<Event<Self, I>> + IsType<<Self as frame_system::Config>::Event>;
+		/// Id of the message. Whenever message is passed to the dispatch module, it emits
+		/// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if
+		/// it comes from the messages module.
+		type BridgeMessageId: Parameter;
+		/// Type of account ID on source chain.
+		type SourceChainAccountId: Parameter
+			+ Member
+			+ MaybeSerializeDeserialize
+			+ Debug
+			+ MaybeDisplay
+			+ Ord
+			+ Default;
+		/// Type of account public key on target chain.
+		type TargetChainAccountPublic: Parameter + IdentifyAccount<AccountId = Self::AccountId>;
+		/// Type of signature that may prove that the message has been signed by
+		/// owner of `TargetChainAccountPublic`.
+		type TargetChainSignature: Parameter + Verify<Signer = Self::TargetChainAccountPublic>;
+		/// The overarching dispatch call type.
+		type Call: Parameter
+			+ GetDispatchInfo
+			+ Dispatchable<
+				Origin = <Self as frame_system::Config>::Origin,
+				PostInfo = frame_support::dispatch::PostDispatchInfo,
+			>;
+		/// Pre-dispatch filter for incoming calls.
+		///
+		/// The pallet will filter all incoming calls right before they're dispatched. If this
+		/// filter rejects the call, special event (`Event::MessageCallRejected`) is emitted.
+		type CallFilter: Contains<<Self as Config<I>>::Call>;
+		/// The type that is used to wrap the `Self::Call` when it is moved over bridge.
+		///
+		/// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure
+		/// that all other stuff (like `spec_version`) is ok. If we would try to decode
+		/// `Call` which has been encoded using previous `spec_version`, then we might end
+		/// up with decoding error, instead of `MessageVersionSpecMismatch`.
+		type EncodedCall: Decode + Encode + Into<Result<<Self as Config<I>>::Call, ()>>;
+		/// A type which can be turned into an AccountId from a 256-bit hash.
+		///
+		/// Used when deriving target chain AccountIds from source chain AccountIds.
+		type AccountIdConverter: sp_runtime::traits::Convert<sp_core::hash::H256, Self::AccountId>;
+	}
+
+	type BridgeMessageIdOf<T, I> = <T as Config<I>>::BridgeMessageId;
+
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
+
+	#[pallet::hooks]
+	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {}
 
-decl_event!(
-	pub enum Event<T, I = DefaultInstance> where
-		<T as Config<I>>::MessageId,
-		AccountId = <T as frame_system::Config>::AccountId,
-	{
+	#[pallet::call]
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {}
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config<I>, I: 'static = ()> {
 		/// Message has been rejected before reaching dispatch.
-		MessageRejected(ChainId, MessageId),
+		MessageRejected(ChainId, BridgeMessageIdOf<T, I>),
 		/// Message has been rejected by dispatcher because of spec version mismatch.
 		/// Last two arguments are: expected and passed spec version.
-		MessageVersionSpecMismatch(ChainId, MessageId, SpecVersion, SpecVersion),
+		MessageVersionSpecMismatch(ChainId, BridgeMessageIdOf<T, I>, SpecVersion, SpecVersion),
 		/// Message has been rejected by dispatcher because of weight mismatch.
 		/// Last two arguments are: expected and passed call weight.
-		MessageWeightMismatch(ChainId, MessageId, Weight, Weight),
+		MessageWeightMismatch(ChainId, BridgeMessageIdOf<T, I>, Weight, Weight),
 		/// Message signature mismatch.
-		MessageSignatureMismatch(ChainId, MessageId),
+		MessageSignatureMismatch(ChainId, BridgeMessageIdOf<T, I>),
 		/// We have failed to decode Call from the message.
-		MessageCallDecodeFailed(ChainId, MessageId),
+		MessageCallDecodeFailed(ChainId, BridgeMessageIdOf<T, I>),
 		/// The call from the message has been rejected by the call filter.
-		MessageCallRejected(ChainId, MessageId),
+		MessageCallRejected(ChainId, BridgeMessageIdOf<T, I>),
 		/// The origin account has failed to pay fee for dispatching the message.
-		MessageDispatchPaymentFailed(ChainId, MessageId, AccountId, Weight),
+		MessageDispatchPaymentFailed(
+			ChainId,
+			BridgeMessageIdOf<T, I>,
+			<T as frame_system::Config>::AccountId,
+			Weight,
+		),
 		/// Message has been dispatched with given result.
-		MessageDispatched(ChainId, MessageId, DispatchResult),
+		MessageDispatched(ChainId, BridgeMessageIdOf<T, I>, DispatchResult),
 		/// Phantom member, never used. Needed to handle multiple pallet instances.
 		_Dummy(PhantomData<I>),
 	}
-);
-
-decl_module! {
-	/// Call Dispatch FRAME Pallet.
-	pub struct Module<T: Config<I>, I: Instance = DefaultInstance> for enum Call where origin: T::Origin {
-		/// Deposit one of this module's events by using the default implementation.
-		fn deposit_event() = default;
-	}
 }
 
-impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for Pallet<T, I> {
-	type Message =
-		MessagePayload<T::SourceChainAccountId, T::TargetChainAccountPublic, T::TargetChainSignature, T::EncodedCall>;
+impl<T: Config<I>, I: 'static> MessageDispatch<T::AccountId, T::BridgeMessageId> for Pallet<T, I> {
+	type Message = MessagePayload<
+		T::SourceChainAccountId,
+		T::TargetChainAccountPublic,
+		T::TargetChainSignature,
+		T::EncodedCall,
+	>;
 
-	fn dispatch_weight(message: &Self::Message) -> Weight {
+	fn dispatch_weight(message: &Self::Message) -> bp_message_dispatch::Weight {
 		message.weight
 	}
 
-	fn dispatch<P: FnOnce(&T::AccountId, Weight) -> Result<(), ()>>(
+	fn dispatch<P: FnOnce(&T::AccountId, bp_message_dispatch::Weight) -> Result<(), ()>>(
 		source_chain: ChainId,
 		target_chain: ChainId,
-		id: T::MessageId,
+		id: T::BridgeMessageId,
 		message: Result<Self::Message, ()>,
 		pay_dispatch_fee: P,
 	) -> MessageDispatchResult {
@@ -152,13 +168,13 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 					source_chain,
 					id,
 				);
-				Self::deposit_event(RawEvent::MessageRejected(source_chain, id));
+				Self::deposit_event(Event::MessageRejected(source_chain, id));
 				return MessageDispatchResult {
 					dispatch_result: false,
 					unspent_weight: 0,
 					dispatch_fee_paid_during_dispatch: false,
-				};
-			}
+				}
+			},
 		};
 
 		// verify spec version
@@ -177,13 +193,13 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 				expected_version,
 				message.spec_version,
 			);
-			Self::deposit_event(RawEvent::MessageVersionSpecMismatch(
+			Self::deposit_event(Event::MessageVersionSpecMismatch(
 				source_chain,
 				id,
 				expected_version,
 				message.spec_version,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 
 		// now that we have spec version checked, let's decode the call
@@ -196,19 +212,20 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 					source_chain,
 					id,
 				);
-				Self::deposit_event(RawEvent::MessageCallDecodeFailed(source_chain, id));
-				return dispatch_result;
-			}
+				Self::deposit_event(Event::MessageCallDecodeFailed(source_chain, id));
+				return dispatch_result
+			},
 		};
 
 		// prepare dispatch origin
 		let origin_account = match message.origin {
 			CallOrigin::SourceRoot => {
-				let hex_id = derive_account_id::<T::SourceChainAccountId>(source_chain, SourceAccount::Root);
+				let hex_id =
+					derive_account_id::<T::SourceChainAccountId>(source_chain, SourceAccount::Root);
 				let target_id = T::AccountIdConverter::convert(hex_id);
 				log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id);
 				target_id
-			}
+			},
 			CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => {
 				let digest = account_ownership_digest(
 					&call,
@@ -228,19 +245,20 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 						target_account,
 						target_signature,
 					);
-					Self::deposit_event(RawEvent::MessageSignatureMismatch(source_chain, id));
-					return dispatch_result;
+					Self::deposit_event(Event::MessageSignatureMismatch(source_chain, id));
+					return dispatch_result
 				}
 
 				log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account);
 				target_account
-			}
+			},
 			CallOrigin::SourceAccount(source_account_id) => {
-				let hex_id = derive_account_id(source_chain, SourceAccount::Account(source_account_id));
+				let hex_id =
+					derive_account_id(source_chain, SourceAccount::Account(source_account_id));
 				let target_id = T::AccountIdConverter::convert(hex_id);
 				log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id);
 				target_id
-			}
+			},
 		};
 
 		// filter the call
@@ -252,8 +270,8 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 				id,
 				call,
 			);
-			Self::deposit_event(RawEvent::MessageCallRejected(source_chain, id));
-			return dispatch_result;
+			Self::deposit_event(Event::MessageCallRejected(source_chain, id));
+			return dispatch_result
 		}
 
 		// verify weight
@@ -270,18 +288,21 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 				expected_weight,
 				message.weight,
 			);
-			Self::deposit_event(RawEvent::MessageWeightMismatch(
+			Self::deposit_event(Event::MessageWeightMismatch(
 				source_chain,
 				id,
 				expected_weight,
 				message.weight,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 
 		// pay dispatch fee right before dispatch
-		let pay_dispatch_fee_at_target_chain = message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
-		if pay_dispatch_fee_at_target_chain && pay_dispatch_fee(&origin_account, message.weight).is_err() {
+		let pay_dispatch_fee_at_target_chain =
+			message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain;
+		if pay_dispatch_fee_at_target_chain &&
+			pay_dispatch_fee(&origin_account, message.weight).is_err()
+		{
 			log::trace!(
 				target: "runtime::bridge-dispatch",
 				"Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}",
@@ -289,13 +310,13 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 				id,
 				message.weight,
 			);
-			Self::deposit_event(RawEvent::MessageDispatchPaymentFailed(
+			Self::deposit_event(Event::MessageDispatchPaymentFailed(
 				source_chain,
 				id,
 				origin_account,
 				message.weight,
 			));
-			return dispatch_result;
+			return dispatch_result
 		}
 		dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain;
 
@@ -313,13 +334,13 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 			"Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}. Call dispatch result: {:?}",
 			source_chain,
 			id,
-			dispatch_result.unspent_weight,
+			actual_call_weight,
 			message.weight,
 			dispatch_result,
 			result,
 		);
 
-		Self::deposit_event(RawEvent::MessageDispatched(
+		Self::deposit_event(Event::MessageDispatched(
 			source_chain,
 			id,
 			result.map(drop).map_err(|e| e.error),
@@ -335,9 +356,19 @@ impl<T: Config<I>, I: Instance> MessageDispatch<T::AccountId, T::MessageId> for
 /// For example, if a message is sent from a "regular" account on the source chain it will not be
 /// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source
 /// chain _before_ sending a message whose dispatch will be rejected on the target chain.
-pub fn verify_message_origin<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call>(
+pub fn verify_message_origin<
+	SourceChainAccountId,
+	TargetChainAccountPublic,
+	TargetChainSignature,
+	Call,
+>(
 	sender_origin: &RawOrigin<SourceChainAccountId>,
-	message: &MessagePayload<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call>,
+	message: &MessagePayload<
+		SourceChainAccountId,
+		TargetChainAccountPublic,
+		TargetChainSignature,
+		Call,
+	>,
 ) -> Result<Option<SourceChainAccountId>, BadOrigin>
 where
 	SourceChainAccountId: PartialEq + Clone,
@@ -346,21 +377,19 @@ where
 		CallOrigin::SourceRoot => {
 			ensure!(sender_origin == &RawOrigin::Root, BadOrigin);
 			Ok(None)
-		}
+		},
 		CallOrigin::TargetAccount(ref source_account_id, _, _) => {
-			ensure!(
-				sender_origin == &RawOrigin::Signed(source_account_id.clone()),
-				BadOrigin
-			);
+			ensure!(sender_origin == &RawOrigin::Signed(source_account_id.clone()), BadOrigin);
 			Ok(Some(source_account_id.clone()))
-		}
+		},
 		CallOrigin::SourceAccount(ref source_account_id) => {
 			ensure!(
-				sender_origin == &RawOrigin::Signed(source_account_id.clone()) || sender_origin == &RawOrigin::Root,
+				sender_origin == &RawOrigin::Signed(source_account_id.clone()) ||
+					sender_origin == &RawOrigin::Root,
 				BadOrigin
 			);
 			Ok(Some(source_account_id.clone()))
-		}
+		},
 	}
 }
 
@@ -397,7 +426,8 @@ mod tests {
 	#![allow(clippy::from_over_into)]
 
 	use super::*;
-	use frame_support::{dispatch::GetDispatchInfo, parameter_types, weights::Weight};
+	use codec::Decode;
+	use frame_support::{parameter_types, weights::Weight};
 	use frame_system::{EventRecord, Phase};
 	use scale_info::TypeInfo;
 	use sp_core::H256;
@@ -408,7 +438,7 @@ mod tests {
 	};
 
 	type AccountId = u64;
-	type MessageId = [u8; 4];
+	type BridgeMessageId = [u8; 4];
 
 	const SOURCE_CHAIN_ID: ChainId = *b"srce";
 	const TARGET_CHAIN_ID: ChainId = *b"trgt";
@@ -494,7 +524,7 @@ mod tests {
 
 	impl Config for TestRuntime {
 		type Event = Event;
-		type MessageId = MessageId;
+		type BridgeMessageId = BridgeMessageId;
 		type SourceChainAccountId = AccountId;
 		type TargetChainAccountPublic = TestAccountPublic;
 		type TargetChainSignature = TestSignature;
@@ -525,16 +555,17 @@ mod tests {
 	const TEST_WEIGHT: Weight = 1_000_000_000;
 
 	fn new_test_ext() -> sp_io::TestExternalities {
-		let t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 		sp_io::TestExternalities::new(t)
 	}
 
 	fn prepare_message(
 		origin: CallOrigin<AccountId, TestAccountPublic, TestSignature>,
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		MessagePayload {
 			spec_version: TEST_SPEC_VERSION,
 			weight: TEST_WEIGHT,
@@ -546,20 +577,29 @@ mod tests {
 
 	fn prepare_root_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		prepare_message(CallOrigin::SourceRoot, call)
 	}
 
 	fn prepare_target_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1));
 		prepare_message(origin, call)
 	}
 
 	fn prepare_source_message(
 		call: Call,
-	) -> <Pallet<TestRuntime> as MessageDispatch<AccountId, <TestRuntime as Config>::MessageId>>::Message {
+	) -> <Pallet<TestRuntime> as MessageDispatch<
+		AccountId,
+		<TestRuntime as Config>::BridgeMessageId,
+	>>::Message {
 		let origin = CallOrigin::SourceAccount(1);
 		prepare_message(origin, call)
 	}
@@ -570,14 +610,20 @@ mod tests {
 			let id = [0; 4];
 
 			const BAD_SPEC_VERSION: SpecVersion = 99;
-			let mut message = prepare_root_message(Call::System(frame_system::Call::<TestRuntime>::remark {
+			let mut message = prepare_root_message(Call::System(frame_system::Call::remark {
 				remark: vec![1, 2, 3],
 			}));
 			let weight = message.weight;
 			message.spec_version = BAD_SPEC_VERSION;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -585,12 +631,14 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageVersionSpecMismatch(
-						SOURCE_CHAIN_ID,
-						id,
-						TEST_SPEC_VERSION,
-						BAD_SPEC_VERSION
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageVersionSpecMismatch(
+							SOURCE_CHAIN_ID,
+							id,
+							TEST_SPEC_VERSION,
+							BAD_SPEC_VERSION
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -601,17 +649,20 @@ mod tests {
 	fn should_fail_on_weight_mismatch() {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
-			let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![1, 2, 3] });
+			let call = Call::System(frame_system::Call::remark { remark: vec![1, 2, 3] });
 			let call_weight = call.get_dispatch_info().weight;
 			let mut message = prepare_root_message(call);
 			message.weight = 7;
-			assert!(
-				call_weight != 7,
-				"needed for test to actually trigger a weight mismatch"
-			);
+			assert!(call_weight != 7, "needed for test to actually trigger a weight mismatch");
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, 7);
 			assert!(!result.dispatch_result);
 
@@ -619,12 +670,14 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageWeightMismatch(
-						SOURCE_CHAIN_ID,
-						id,
-						call_weight,
-						7,
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageWeightMismatch(
+							SOURCE_CHAIN_ID,
+							id,
+							call_weight,
+							7,
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -639,12 +692,18 @@ mod tests {
 			let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99));
 			let message = prepare_message(
 				call_origin,
-				Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![1, 2, 3] }),
+				Call::System(frame_system::Call::remark { remark: vec![1, 2, 3] }),
 			);
 			let weight = message.weight;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -652,10 +711,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageSignatureMismatch(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageSignatureMismatch(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -668,7 +729,13 @@ mod tests {
 			let id = [0; 4];
 
 			System::set_block_number(1);
-			Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Err(()), |_, _| unreachable!());
+			Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Err(()),
+				|_, _| unreachable!(),
+			);
 
 			assert_eq!(
 				System::events(),
@@ -689,14 +756,20 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message = prepare_root_message(Call::System(frame_system::Call::<TestRuntime>::remark {
+			let mut message = prepare_root_message(Call::System(frame_system::Call::remark {
 				remark: vec![1, 2, 3],
 			}));
 			let weight = message.weight;
 			message.call.0 = vec![];
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -704,10 +777,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallDecodeFailed(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageCallDecodeFailed(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -719,15 +794,20 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let call = Call::System(frame_system::Call::<TestRuntime>::fill_block {
-				ratio: Perbill::from_percent(75),
-			});
+			let call =
+				Call::System(frame_system::Call::fill_block { ratio: Perbill::from_percent(75) });
 			let weight = call.get_dispatch_info().weight;
 			let mut message = prepare_root_message(call);
 			message.weight = weight;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -735,10 +815,12 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageCallRejected(
-						SOURCE_CHAIN_ID,
-						id
-					)),
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageCallRejected(
+							SOURCE_CHAIN_ID,
+							id
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -750,14 +832,17 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message = prepare_root_message(Call::System(frame_system::Call::<TestRuntime>::remark {
+			let mut message = prepare_root_message(Call::System(frame_system::Call::remark {
 				remark: vec![1, 2, 3],
 			}));
 			let weight = message.weight;
 			message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Err(()));
+			let result =
+				Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| {
+					Err(())
+				});
 			assert_eq!(result.unspent_weight, weight);
 			assert!(!result.dispatch_result);
 
@@ -765,15 +850,17 @@ mod tests {
 				System::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: Event::Dispatch(call_dispatch::Event::<TestRuntime>::MessageDispatchPaymentFailed(
-						SOURCE_CHAIN_ID,
-						id,
-						AccountIdConverter::convert(derive_account_id::<AccountId>(
+					event: Event::Dispatch(
+						call_dispatch::Event::<TestRuntime>::MessageDispatchPaymentFailed(
 							SOURCE_CHAIN_ID,
-							SourceAccount::Root
-						)),
-						TEST_WEIGHT,
-					)),
+							id,
+							AccountIdConverter::convert(derive_account_id::<AccountId>(
+								SOURCE_CHAIN_ID,
+								SourceAccount::Root
+							)),
+							TEST_WEIGHT,
+						)
+					),
 					topics: vec![],
 				}],
 			);
@@ -785,13 +872,19 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let mut message = prepare_root_message(Call::System(frame_system::Call::<TestRuntime>::remark {
+			let mut message = prepare_root_message(Call::System(frame_system::Call::remark {
 				remark: vec![1, 2, 3],
 			}));
 			message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain;
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| Ok(()));
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| Ok(()),
+			);
 			assert!(result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -815,11 +908,17 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let call = Call::System(frame_system::Call::<TestRuntime>::set_heap_pages { pages: 1 });
+			let call = Call::System(frame_system::Call::set_heap_pages { pages: 1 });
 			let message = prepare_target_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(!result.dispatch_result);
 
@@ -842,12 +941,18 @@ mod tests {
 	fn should_dispatch_bridge_message_from_root_origin() {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
-			let message = prepare_root_message(Call::System(frame_system::Call::<TestRuntime>::remark {
+			let message = prepare_root_message(Call::System(frame_system::Call::remark {
 				remark: vec![1, 2, 3],
 			}));
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -871,11 +976,17 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![] });
+			let call = Call::System(frame_system::Call::remark { remark: vec![] });
 			let message = prepare_target_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -899,11 +1010,17 @@ mod tests {
 		new_test_ext().execute_with(|| {
 			let id = [0; 4];
 
-			let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![] });
+			let call = Call::System(frame_system::Call::remark { remark: vec![] });
 			let message = prepare_source_message(call);
 
 			System::set_block_number(1);
-			let result = Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| unreachable!());
+			let result = Dispatch::dispatch(
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+				id,
+				Ok(message),
+				|_, _| unreachable!(),
+			);
 			assert!(!result.dispatch_fee_paid_during_dispatch);
 			assert!(result.dispatch_result);
 
@@ -924,60 +1041,42 @@ mod tests {
 
 	#[test]
 	fn origin_is_checked_when_verifying_sending_message_using_source_root_account() {
-		let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![] });
+		let call = Call::System(frame_system::Call::remark { remark: vec![] });
 		let message = prepare_root_message(call);
 
 		// When message is sent by Root, CallOrigin::SourceRoot is allowed
 		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None)));
 
 		// when message is sent by some real account, CallOrigin::SourceRoot is not allowed
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Err(BadOrigin)));
 	}
 
 	#[test]
 	fn origin_is_checked_when_verifying_sending_message_using_target_account() {
-		let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![] });
+		let call = Call::System(frame_system::Call::remark { remark: vec![] });
 		let message = prepare_target_message(call);
 
 		// When message is sent by Root, CallOrigin::TargetAccount is not allowed
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Root, &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Err(BadOrigin)));
 
 		// When message is sent by some other account, it is rejected
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(2), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin)));
 
 		// When message is sent by a real account, it is allowed to have origin
 		// CallOrigin::TargetAccount
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Ok(Some(1))
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1))));
 	}
 
 	#[test]
 	fn origin_is_checked_when_verifying_sending_message_using_source_account() {
-		let call = Call::System(frame_system::Call::<TestRuntime>::remark { remark: vec![] });
+		let call = Call::System(frame_system::Call::remark { remark: vec![] });
 		let message = prepare_source_message(call);
 
 		// Sending a message from the expected origin account works
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(1), &message),
-			Ok(Some(1))
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1))));
 
 		// If we send a message from a different account, it is rejected
-		assert!(matches!(
-			verify_message_origin(&RawOrigin::Signed(2), &message),
-			Err(BadOrigin)
-		));
+		assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin)));
 
 		// The Root account is allowed to assume any expected origin account
 		assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1))));
diff --git a/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml b/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml
index 6177ed3734ca823906ca5e6abf52038066eb74d1..ffb98bc6bd85ed470a578a7896de12138d5a7fe7 100644
--- a/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml
+++ b/polkadot/bridges/modules/ethereum-contract-builtin/Cargo.toml
@@ -7,9 +7,9 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-ethereum-types = "0.11.0"
-finality-grandpa = "0.14.4"
+codec = { package = "parity-scale-codec", version = "2.2.0" }
+ethereum-types = "0.12.0"
+finality-grandpa = "0.14.0"
 hex = "0.4"
 log = "0.4.14"
 
diff --git a/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs b/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs
index a07f838cf8d64d7a0fabe051c5593d75a502ca57..4a830f8e0a389f916455695e59d2ec9c44b3b081 100644
--- a/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs
+++ b/polkadot/bridges/modules/ethereum-contract-builtin/src/lib.rs
@@ -28,9 +28,9 @@ pub enum Error {
 	BlockNumberDecode,
 	/// Failed to decode Substrate header.
 	HeaderDecode(codec::Error),
-	/// Failed to decode best voters set.
+	/// Failed to decode the best voters set.
 	BestSetDecode(codec::Error),
-	/// Best voters set is invalid.
+	/// The best voters set is invalid.
 	InvalidBestSet,
 	/// Failed to decode finality proof.
 	FinalityProofDecode(codec::Error),
@@ -135,7 +135,9 @@ pub fn verify_substrate_finality_proof(
 ) -> Result<(), Error> {
 	let best_set = AuthorityList::decode(&mut &*raw_best_set)
 		.map_err(Error::BestSetDecode)
-		.and_then(|authorities| VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet));
+		.and_then(|authorities| {
+			VoterSet::new(authorities.into_iter()).ok_or(Error::InvalidBestSet)
+		});
 
 	log::debug!(
 		target: "bridge-builtin",
@@ -150,15 +152,16 @@ pub fn verify_substrate_finality_proof(
 
 	let best_set = best_set?;
 
-	let verify_result = sc_finality_grandpa::GrandpaJustification::<Block>::decode_and_verify_finalizes(
-		raw_finality_proof,
-		(finality_target_hash, finality_target_number),
-		best_set_id,
-		&best_set,
-	)
-	.map_err(Box::new)
-	.map_err(Error::JustificationVerify)
-	.map(|_| ());
+	let verify_result =
+		sc_finality_grandpa::GrandpaJustification::<Block>::decode_and_verify_finalizes(
+			raw_finality_proof,
+			(finality_target_hash, finality_target_number),
+			best_set_id,
+			&best_set,
+		)
+		.map_err(Box::new)
+		.map_err(Error::JustificationVerify)
+		.map(|_| ());
 
 	log::debug!(
 		target: "bridge-builtin",
@@ -202,10 +205,7 @@ mod tests {
 	#[test]
 	fn from_substrate_block_number_succeeds() {
 		assert_eq!(from_substrate_block_number(0).unwrap(), U256::zero());
-		assert_eq!(
-			from_substrate_block_number(std::u32::MAX).unwrap(),
-			U256::from(std::u32::MAX)
-		);
+		assert_eq!(from_substrate_block_number(std::u32::MAX).unwrap(), U256::from(std::u32::MAX));
 	}
 
 	#[test]
@@ -285,10 +285,7 @@ mod tests {
 					.parse()
 					.unwrap(),
 				number: 8,
-				signal: Some(ValidatorsSetSignal {
-					delay: 8,
-					validators: authorities.encode(),
-				}),
+				signal: Some(ValidatorsSetSignal { delay: 8, validators: authorities.encode() }),
 			},
 		);
 	}
@@ -296,13 +293,14 @@ mod tests {
 	/// Number of the example block with justification.
 	const EXAMPLE_JUSTIFIED_BLOCK_NUMBER: u32 = 8;
 	/// Hash of the example block with justification.
-	const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str = "a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
-	/// Id of authorities set that have generated example justification. Could be computed by tracking
-	/// every set change in canonized headers.
+	const EXAMPLE_JUSTIFIED_BLOCK_HASH: &str =
+		"a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f343775";
+	/// Id of authorities set that have generated example justification. Could be computed by
+	/// tracking every set change in canonized headers.
 	const EXAMPLE_AUTHORITIES_SET_ID: u64 = 0;
-	/// Encoded authorities set that has generated example justification. Could be fetched from `ScheduledChange`
-	/// digest of the block that has scheduled this set OR by calling `GrandpaApi::grandpa_authorities()` at
-	/// appropriate block.
+	/// Encoded authorities set that has generated example justification. Could be fetched from
+	/// `ScheduledChange` digest of the block that has scheduled this set OR by calling
+	/// `GrandpaApi::grandpa_authorities()` at appropriate block.
 	const EXAMPLE_AUTHORITIES_SET: &str = "1488dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0ee0100000000000000d17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae690100000000000000439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234f01000000000000005e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d901000000000000001dfe3e22cc0d45c70779c1095f7489a8ef3cf52d62fbd8c2fa38c9f1723502b50100000000000000";
 	/// Example justification. Could be fetched by calling 'chain_getBlock' RPC.
 	const EXAMPLE_JUSTIFICATION: &str = "2600000000000000a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000010a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000d66b4ceb57ef8bcbc955071b597c8c5d2adcfdbb009c73f8438d342670fdeca9ac60686cbd58105b10f51d0a64a8e73b2e5829b2eab3248a008c472852130b00439660b36c6c03afafca027b910b4fecf99801834c62a5e6006f27d978de234fa2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000f5730c14d3cd22b7661e2f5fcb3139dd5fef37f946314a441d01b40ce1200ef70d810525f23fd278b588cd67473c200bda83c338c407b479386aa83798e5970b5e639b43e0052c47447dac87d6fd2b6ec50bdd4d0f614e4299c665249bbd09d9a2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f34377508000000c78d6ec463f476461a695b4791d30e7626d16fdf72d7c252c2cad387495a97e8c2827ed4d5af853d6e05d31cb6fb7438c9481a7e9c6990d60a9bfaf6a6e1930988dc3417d5058ec4b4503e0c12ea1a0a89be200fe98922423d4334014fa6b0eea2f45892db86b2ad133ce57d81b7e4375bb7035ce9883e6b68c358164f3437750800000052b4fc52d430286b3e2d650aa6e01b6ff4fae8b968893a62be789209eb97ee6e23780d3f5af7042d85bb48f1b202890b22724dfebce138826f66a5e00324320fd17c2d7823ebf260fd138f2d7e27d114c0145d968b5ff5006125f2414fadae6900";
diff --git a/polkadot/bridges/modules/ethereum/Cargo.toml b/polkadot/bridges/modules/ethereum/Cargo.toml
index 94e4087010d6e8fb1939b447ffc732f85edcc79d..a4dc60121173a51307f4c414e8bf6198eef8864a 100644
--- a/polkadot/bridges/modules/ethereum/Cargo.toml
+++ b/polkadot/bridges/modules/ethereum/Cargo.toml
@@ -7,9 +7,10 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
-libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"], optional = true }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
+libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"], optional = true }
 log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0", optional = true }
 
 # Bridge dependencies
@@ -18,15 +19,15 @@ bp-eth-poa = { path = "../../primitives/ethereum-poa", default-features = false
 
 # Substrate Dependencies
 
-frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
-libsecp256k1 = { version = "0.3.4", features = ["hmac"] }
+libsecp256k1 = { version = "0.7", features = ["hmac"] }
 hex-literal = "0.3"
 
 [features]
@@ -38,6 +39,7 @@ std = [
 	"frame-support/std",
 	"frame-system/std",
 	"log/std",
+	"scale-info/std",
 	"serde",
 	"sp-io/std",
 	"sp-runtime/std",
diff --git a/polkadot/bridges/modules/ethereum/src/benchmarking.rs b/polkadot/bridges/modules/ethereum/src/benchmarking.rs
index 960dbe9afec24ccafe56702f53ca3c8a0e6e97a6..511cbcac1ade93004b80f0969a29d7409d502c68 100644
--- a/polkadot/bridges/modules/ethereum/src/benchmarking.rs
+++ b/polkadot/bridges/modules/ethereum/src/benchmarking.rs
@@ -17,15 +17,15 @@
 use super::*;
 
 use crate::test_utils::{
-	build_custom_header, build_genesis_header, insert_header, validator_utils::*, validators_change_receipt,
-	HeaderBuilder,
+	build_custom_header, build_genesis_header, insert_header, validator_utils::*,
+	validators_change_receipt, HeaderBuilder,
 };
 
 use bp_eth_poa::{compute_merkle_root, U256};
-use frame_benchmarking::benchmarks_instance;
+use frame_benchmarking::benchmarks_instance_pallet;
 use frame_system::RawOrigin;
 
-benchmarks_instance! {
+benchmarks_instance_pallet! {
 	// Benchmark `import_unsigned_header` extrinsic with the best possible conditions:
 	// * Parent header is finalized.
 	// * New header doesn't require receipts.
@@ -46,7 +46,7 @@ benchmarks_instance! {
 				header
 			},
 		);
-	}: import_unsigned_header(RawOrigin::None, header, None)
+	}: import_unsigned_header(RawOrigin::None, Box::new(header), None)
 	verify {
 		let storage = BridgeStorage::<T, I>::new();
 		assert_eq!(storage.best_block().0.number, 1);
@@ -91,7 +91,7 @@ benchmarks_instance! {
 		// Need to make sure that the header we're going to import hasn't been inserted
 		// into storage already
 		let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority);
-	}: import_unsigned_header(RawOrigin::None, header, None)
+	}: import_unsigned_header(RawOrigin::None, Box::new(header), None)
 	verify {
 		let storage = BridgeStorage::<T, I>::new();
 		assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64);
@@ -132,7 +132,7 @@ benchmarks_instance! {
 		// Need to make sure that the header we're going to import hasn't been inserted
 		// into storage already
 		let header = HeaderBuilder::with_parent(&last_header).sign_by(&last_authority);
-	}: import_unsigned_header(RawOrigin::None, header, None)
+	}: import_unsigned_header(RawOrigin::None, Box::new(header), None)
 	verify {
 		let storage = BridgeStorage::<T, I>::new();
 		assert_eq!(storage.best_block().0.number, (num_blocks + 1) as u64);
@@ -153,7 +153,7 @@ benchmarks_instance! {
 		let validators = validators(num_validators);
 
 		// Want to prune eligible blocks between [0, n)
-		BlocksToPrune::<I>::put(PruningRange {
+		BlocksToPrune::<T, I>::put(PruningRange {
 			oldest_unpruned_block: 0,
 			oldest_block_to_keep: n as u64,
 		});
@@ -167,13 +167,13 @@ benchmarks_instance! {
 		}
 
 		let header = HeaderBuilder::with_parent(&parent).sign_by_set(&validators);
-	}: import_unsigned_header(RawOrigin::None, header, None)
+	}: import_unsigned_header(RawOrigin::None, Box::new(header), None)
 	verify {
 		let storage = BridgeStorage::<T, I>::new();
 		let max_pruned: u64 = (n - 1) as _;
 		assert_eq!(storage.best_block().0.number, (n + 1) as u64);
-		assert!(HeadersByNumber::<I>::get(&0).is_none());
-		assert!(HeadersByNumber::<I>::get(&max_pruned).is_none());
+		assert!(HeadersByNumber::<T, I>::get(&0).is_none());
+		assert!(HeadersByNumber::<T, I>::get(&max_pruned).is_none());
 	}
 
 	// The goal of this bench is to import a block which contains a transaction receipt. The receipt
@@ -209,14 +209,14 @@ benchmarks_instance! {
 				header
 			},
 		);
-	}: import_unsigned_header(RawOrigin::None, header, Some(receipts))
+	}: import_unsigned_header(RawOrigin::None, Box::new(header), Some(receipts))
 	verify {
 		let storage = BridgeStorage::<T, I>::new();
 		assert_eq!(storage.best_block().0.number, 2);
 	}
 }
 
-fn initialize_bench<T: Config<I>, I: Instance>(num_validators: usize) -> AuraHeader {
+fn initialize_bench<T: Config<I>, I: 'static>(num_validators: usize) -> AuraHeader {
 	// Initialize storage with some initial header
 	let initial_header = build_genesis_header(&validator(0));
 	let initial_difficulty = initial_header.difficulty;
diff --git a/polkadot/bridges/modules/ethereum/src/error.rs b/polkadot/bridges/modules/ethereum/src/error.rs
index ad798379da7dcc827fdb31bb8e6e4dae57ee7307..6fd376b01715f81c8e19e886844417b7683530ef 100644
--- a/polkadot/bridges/modules/ethereum/src/error.rs
+++ b/polkadot/bridges/modules/ethereum/src/error.rs
@@ -85,7 +85,8 @@ impl Error {
 			Error::InsufficientProof => "Header has insufficient proof",
 			Error::InvalidDifficulty => "Header has invalid difficulty",
 			Error::NotValidator => "Header is sealed by unexpected validator",
-			Error::MissingTransactionsReceipts => "The import operation requires transactions receipts",
+			Error::MissingTransactionsReceipts =>
+				"The import operation requires transactions receipts",
 			Error::RedundantTransactionsReceipts => "Redundant transactions receipts are provided",
 			Error::TransactionsReceiptsMismatch => "Invalid transactions receipts provided",
 			Error::UnsignedTooFarInTheFuture => "The unsigned header is too far in future",
diff --git a/polkadot/bridges/modules/ethereum/src/finality.rs b/polkadot/bridges/modules/ethereum/src/finality.rs
index 4ab276db777e12040f03f4cc8f5ba5edb1220945..cbfb4a04af86753e1a92ed7f5d4d91a3b8b148d4 100644
--- a/polkadot/bridges/modules/ethereum/src/finality.rs
+++ b/polkadot/bridges/modules/ethereum/src/finality.rs
@@ -14,18 +14,20 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::Storage;
+use crate::{error::Error, Storage};
 use bp_eth_poa::{public_to_address, Address, AuraHeader, HeaderId, SealedEmptyStep, H256};
 use codec::{Decode, Encode};
+use scale_info::TypeInfo;
 use sp_io::crypto::secp256k1_ecdsa_recover;
 use sp_runtime::RuntimeDebug;
-use sp_std::collections::{
-	btree_map::{BTreeMap, Entry},
-	btree_set::BTreeSet,
-	vec_deque::VecDeque,
+use sp_std::{
+	collections::{
+		btree_map::{BTreeMap, Entry},
+		btree_set::BTreeSet,
+		vec_deque::VecDeque,
+	},
+	prelude::*,
 };
-use sp_std::prelude::*;
 
 /// Cached finality votes for given block.
 #[derive(RuntimeDebug)]
@@ -36,7 +38,7 @@ pub struct CachedFinalityVotes<Submitter> {
 	/// best finalized.
 	pub stopped_at_finalized_sibling: bool,
 	/// Header ancestors that were read while we have been searching for
-	/// cached votes entry. Newest header has index 0.
+	/// cached votes entry. The newest header has index 0.
 	pub unaccounted_ancestry: VecDeque<(HeaderId, Option<Submitter>, AuraHeader)>,
 	/// Cached finality votes, if they have been found. The associated
 	/// header is not included into `unaccounted_ancestry`.
@@ -54,18 +56,18 @@ pub struct FinalityEffects<Submitter> {
 }
 
 /// Finality votes for given block.
-#[derive(RuntimeDebug, Decode, Encode)]
+#[derive(RuntimeDebug, Decode, Encode, TypeInfo)]
 #[cfg_attr(test, derive(Clone, PartialEq))]
 pub struct FinalityVotes<Submitter> {
 	/// Number of votes per each validator.
 	pub votes: BTreeMap<Address, u64>,
-	/// Ancestry blocks with oldest ancestors at the beginning and newest at the
+	/// Ancestry blocks with the oldest ancestors at the beginning and newest at the
 	/// end of the queue.
 	pub ancestry: VecDeque<FinalityAncestor<Submitter>>,
 }
 
 /// Information about block ancestor that is used in computations.
-#[derive(RuntimeDebug, Decode, Encode)]
+#[derive(RuntimeDebug, Decode, Encode, TypeInfo)]
 #[cfg_attr(test, derive(Clone, Default, PartialEq))]
 pub struct FinalityAncestor<Submitter> {
 	/// Bock id.
@@ -116,17 +118,14 @@ pub fn finalize_blocks<S: Storage>(
 			&current_votes,
 			ancestor.id.number >= two_thirds_majority_transition,
 		) {
-			break;
+			break
 		}
 
 		remove_signers_votes(&ancestor.signers, &mut current_votes);
 		finalized_headers.push((ancestor.id, ancestor.submitter.clone()));
 	}
 
-	Ok(FinalityEffects {
-		finalized_headers,
-		votes,
-	})
+	Ok(FinalityEffects { finalized_headers, votes })
 }
 
 /// Returns true if there are enough votes to treat this header as finalized.
@@ -135,8 +134,8 @@ fn is_finalized(
 	votes: &BTreeMap<Address, u64>,
 	requires_two_thirds_majority: bool,
 ) -> bool {
-	(!requires_two_thirds_majority && votes.len() * 2 > validators.len())
-		|| (requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2)
+	(!requires_two_thirds_majority && votes.len() * 2 > validators.len()) ||
+		(requires_two_thirds_majority && votes.len() * 3 > validators.len() * 2)
 }
 
 /// Prepare 'votes' of header and its ancestors' signers.
@@ -151,12 +150,12 @@ pub(crate) fn prepare_votes<Submitter>(
 	// if we have reached finalized block sibling, then we're trying
 	// to switch finalized blocks
 	if cached_votes.stopped_at_finalized_sibling {
-		return Err(Error::TryingToFinalizeSibling);
+		return Err(Error::TryingToFinalizeSibling)
 	}
 
 	// this fn can only work with single validators set
 	if !validators.contains(&header.author) {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	// now we have votes that were valid when some block B has been inserted
@@ -171,7 +170,7 @@ pub(crate) fn prepare_votes<Submitter>(
 	while let Some(old_ancestor) = votes.ancestry.pop_front() {
 		if old_ancestor.id.number > best_finalized.number {
 			votes.ancestry.push_front(old_ancestor);
-			break;
+			break
 		}
 
 		remove_signers_votes(&old_ancestor.signers, &mut votes.votes);
@@ -180,7 +179,9 @@ pub(crate) fn prepare_votes<Submitter>(
 	// add votes from new blocks
 	let mut parent_empty_step_signers = empty_steps_signers(header);
 	let mut unaccounted_ancestry = VecDeque::new();
-	while let Some((ancestor_id, ancestor_submitter, ancestor)) = cached_votes.unaccounted_ancestry.pop_front() {
+	while let Some((ancestor_id, ancestor_submitter, ancestor)) =
+		cached_votes.unaccounted_ancestry.pop_front()
+	{
 		let mut signers = empty_steps_signers(&ancestor);
 		sp_std::mem::swap(&mut signers, &mut parent_empty_step_signers);
 		signers.insert(ancestor.author);
@@ -199,11 +200,9 @@ pub(crate) fn prepare_votes<Submitter>(
 	let mut header_signers = BTreeSet::new();
 	header_signers.insert(header.author);
 	*votes.votes.entry(header.author).or_insert(0) += 1;
-	votes.ancestry.push_back(FinalityAncestor {
-		id,
-		submitter,
-		signers: header_signers,
-	});
+	votes
+		.ancestry
+		.push_back(FinalityAncestor { id, submitter, signers: header_signers });
 
 	Ok(votes)
 }
@@ -217,7 +216,7 @@ fn add_signers_votes(
 ) -> Result<(), Error> {
 	for signer in signers_to_add {
 		if !validators.contains(signer) {
-			return Err(Error::NotValidator);
+			return Err(Error::NotValidator)
 		}
 
 		*votes.entry(*signer).or_insert(0) += 1;
@@ -230,13 +229,12 @@ fn add_signers_votes(
 fn remove_signers_votes(signers_to_remove: &BTreeSet<Address>, votes: &mut BTreeMap<Address, u64>) {
 	for signer in signers_to_remove {
 		match votes.entry(*signer) {
-			Entry::Occupied(mut entry) => {
+			Entry::Occupied(mut entry) =>
 				if *entry.get() <= 1 {
 					entry.remove();
 				} else {
 					*entry.get_mut() -= 1;
-				}
-			}
+				},
 			Entry::Vacant(_) => unreachable!("we only remove signers that have been added; qed"),
 		}
 	}
@@ -272,19 +270,19 @@ impl<Submitter> Default for CachedFinalityVotes<Submitter> {
 
 impl<Submitter> Default for FinalityVotes<Submitter> {
 	fn default() -> Self {
-		FinalityVotes {
-			votes: BTreeMap::new(),
-			ancestry: VecDeque::new(),
-		}
+		FinalityVotes { votes: BTreeMap::new(), ancestry: VecDeque::new() }
 	}
 }
 
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime};
-	use crate::{BridgeStorage, FinalityCache, HeaderToImport};
-	use frame_support::StorageMap;
+	use crate::{
+		mock::{
+			insert_header, run_test, validator, validators_addresses, HeaderBuilder, TestRuntime,
+		},
+		BridgeStorage, FinalityCache, HeaderToImport,
+	};
 
 	const TOTAL_VALIDATORS: usize = 5;
 
@@ -342,7 +340,8 @@ mod tests {
 			storage.insert_header(header_to_import.clone());
 
 			// when header#2 is inserted, nothing is finalized (2 votes)
-			header_to_import.header = HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1));
+			header_to_import.header =
+				HeaderBuilder::with_parent_hash(id1.hash).sign_by(&validator(1));
 			header_to_import.id = header_to_import.header.compute_id();
 			let id2 = header_to_import.header.compute_id();
 			assert_eq!(
@@ -361,7 +360,8 @@ mod tests {
 			storage.insert_header(header_to_import.clone());
 
 			// when header#3 is inserted, header#1 is finalized (3 votes)
-			header_to_import.header = HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2));
+			header_to_import.header =
+				HeaderBuilder::with_parent_hash(id2.hash).sign_by(&validator(2));
 			header_to_import.id = header_to_import.header.compute_id();
 			let id3 = header_to_import.header.compute_id();
 			assert_eq!(
@@ -391,7 +391,9 @@ mod tests {
 		// 2) add votes from header#4 and header#5
 		let validators = validators_addresses(5);
 		let headers = (1..6)
-			.map(|number| HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1)))
+			.map(|number| {
+				HeaderBuilder::with_number(number).sign_by(&validator(number as usize - 1))
+			})
 			.collect::<Vec<_>>();
 		let ancestry = headers
 			.iter()
@@ -406,9 +408,10 @@ mod tests {
 			prepare_votes::<()>(
 				CachedFinalityVotes {
 					stopped_at_finalized_sibling: false,
-					unaccounted_ancestry: vec![(headers[3].compute_id(), None, headers[3].clone()),]
-						.into_iter()
-						.collect(),
+					unaccounted_ancestry:
+						vec![(headers[3].compute_id(), None, headers[3].clone()),]
+							.into_iter()
+							.collect(),
 					votes: Some(FinalityVotes {
 						votes: vec![(validators[0], 1), (validators[1], 1), (validators[2], 1),]
 							.into_iter()
@@ -446,7 +449,8 @@ mod tests {
 			let mut ancestry = Vec::new();
 			let mut parent_hash = ctx.genesis.compute_hash();
 			for i in 1..10 {
-				let header = HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3));
+				let header =
+					HeaderBuilder::with_parent_hash(parent_hash).sign_by(&validator((i - 1) / 3));
 				let id = header.compute_id();
 				insert_header(&mut storage, header.clone());
 				hashes.push(id.hash);
@@ -540,10 +544,7 @@ mod tests {
 	fn prepare_votes_fails_when_finalized_sibling_is_in_ancestry() {
 		assert_eq!(
 			prepare_votes::<()>(
-				CachedFinalityVotes {
-					stopped_at_finalized_sibling: true,
-					..Default::default()
-				},
+				CachedFinalityVotes { stopped_at_finalized_sibling: true, ..Default::default() },
 				Default::default(),
 				&validators_addresses(3).iter().collect(),
 				Default::default(),
diff --git a/polkadot/bridges/modules/ethereum/src/import.rs b/polkadot/bridges/modules/ethereum/src/import.rs
index a6945240cde4da2698de11a5d897b3a0d59a800d..d58b5c009a84d57dbf53154113119088419d38a1 100644
--- a/polkadot/bridges/modules/ethereum/src/import.rs
+++ b/polkadot/bridges/modules/ethereum/src/import.rs
@@ -14,22 +14,24 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::finality::finalize_blocks;
-use crate::validators::{Validators, ValidatorsConfiguration};
-use crate::verification::{is_importable_header, verify_aura_header};
-use crate::{AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage};
+use crate::{
+	error::Error,
+	finality::finalize_blocks,
+	validators::{Validators, ValidatorsConfiguration},
+	verification::{is_importable_header, verify_aura_header},
+	AuraConfiguration, ChainTime, ChangeToEnact, PruningStrategy, Storage,
+};
 use bp_eth_poa::{AuraHeader, HeaderId, Receipt};
 use sp_std::{collections::btree_map::BTreeMap, prelude::*};
 
-/// Imports bunch of headers and updates blocks finality.
+/// Imports a bunch of headers and updates blocks finality.
 ///
 /// Transactions receipts must be provided if `header_import_requires_receipts()`
 /// has returned true.
 /// If successful, returns tuple where first element is the number of useful headers
 /// we have imported and the second element is the number of useless headers (duplicate)
 /// we have NOT imported.
-/// Returns error if fatal error has occured during import. Some valid headers may be
+/// Returns error if fatal error has occurred during import. Some valid headers may be
 /// imported in this case.
 /// TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/415)
 #[allow(clippy::too_many_arguments)]
@@ -65,7 +67,7 @@ pub fn import_headers<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 					}
 				}
 				useful += 1;
-			}
+			},
 			Err(Error::AncientHeader) | Err(Error::KnownHeader) => useless += 1,
 			Err(error) => return Err(error),
 		}
@@ -103,7 +105,8 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 
 	// check if block schedules new validators
 	let validators = Validators::new(validators_config);
-	let (scheduled_change, enacted_change) = validators.extract_validators_change(&header, receipts)?;
+	let (scheduled_change, enacted_change) =
+		validators.extract_validators_change(&header, receipts)?;
 
 	// check if block finalizes some other blocks and corresponding scheduled validators
 	let validators_set = import_context.validators_set();
@@ -117,11 +120,10 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 		aura_config.two_thirds_majority_transition,
 	)?;
 	let enacted_change = enacted_change
-		.map(|validators| ChangeToEnact {
-			signal_block: None,
-			validators,
-		})
-		.or_else(|| validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers));
+		.map(|validators| ChangeToEnact { signal_block: None, validators })
+		.or_else(|| {
+			validators.finalize_validators_change(storage, &finalized_blocks.finalized_headers)
+		});
 
 	// NOTE: we can't return Err() from anywhere below this line
 	// (because otherwise we'll have inconsistent storage if transaction will fail)
@@ -145,9 +147,7 @@ pub fn import_header<S: Storage, PS: PruningStrategy, CT: ChainTime>(
 	let new_best_finalized_block_id = finalized_blocks.finalized_headers.last().map(|(id, _)| *id);
 	let pruning_upper_bound = pruning_strategy.pruning_upper_bound(
 		new_best_block_id.number,
-		new_best_finalized_block_id
-			.map(|id| id.number)
-			.unwrap_or(finalized_id.number),
+		new_best_finalized_block_id.map(|id| id.number).unwrap_or(finalized_id.number),
 	);
 
 	// now mark finalized headers && prune old headers
@@ -171,15 +171,16 @@ pub fn header_import_requires_receipts<S: Storage>(
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{
-		run_test, secret_to_address, test_aura_config, test_validators_config, validator, validators_addresses,
-		validators_change_receipt, HeaderBuilder, KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT,
+	use crate::{
+		mock::{
+			run_test, secret_to_address, test_aura_config, test_validators_config, validator,
+			validators_addresses, validators_change_receipt, HeaderBuilder,
+			KeepSomeHeadersBehindBest, TestRuntime, GAS_LIMIT,
+		},
+		validators::ValidatorsSource,
+		BlocksToPrune, BridgeStorage, Headers, PruningRange,
 	};
-	use crate::validators::ValidatorsSource;
-	use crate::DefaultInstance;
-	use crate::{BlocksToPrune, BridgeStorage, Headers, PruningRange};
-	use frame_support::{StorageMap, StorageValue};
-	use secp256k1::SecretKey;
+	use libsecp256k1::SecretKey;
 
 	const TOTAL_VALIDATORS: usize = 3;
 
@@ -188,10 +189,7 @@ mod tests {
 		run_test(TOTAL_VALIDATORS, |_| {
 			let mut storage = BridgeStorage::<TestRuntime>::new();
 			storage.finalize_and_prune_headers(
-				Some(HeaderId {
-					number: 100,
-					..Default::default()
-				}),
+				Some(HeaderId { number: 100, ..Default::default() }),
 				0,
 			);
 			assert_eq!(
@@ -283,8 +281,10 @@ mod tests {
 	#[test]
 	fn headers_are_pruned_during_import() {
 		run_test(TOTAL_VALIDATORS, |ctx| {
-			let validators_config =
-				ValidatorsConfiguration::Single(ValidatorsSource::Contract([3; 20].into(), ctx.addresses.clone()));
+			let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				[3; 20].into(),
+				ctx.addresses.clone(),
+			));
 			let validators = vec![validator(0), validator(1), validator(2)];
 			let mut storage = BridgeStorage::<TestRuntime>::new();
 
@@ -307,7 +307,9 @@ mod tests {
 				)
 				.unwrap();
 				match i {
-					2..=10 => assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,),
+					2..=10 => {
+						assert_eq!(finalized_blocks, vec![(parent_id, Some(100))], "At {}", i,)
+					},
 					_ => assert_eq!(finalized_blocks, vec![], "At {}", i),
 				}
 				latest_block_id = rolling_last_block_id;
@@ -341,8 +343,8 @@ mod tests {
 			latest_block_id = rolling_last_block_id;
 
 			// and now let's say validators 1 && 2 went offline
-			// => in the range 12-25 no blocks are finalized, but we still continue to prune old headers
-			// until header#11 is met. we can't prune #11, because it schedules change
+			// => in the range 12-25 no blocks are finalized, but we still continue to prune old
+			// headers until header#11 is met. we can't prune #11, because it schedules change
 			let mut step = 56u64;
 			let mut expected_blocks = vec![(header11.compute_id(), Some(101))];
 			for i in 12..25 {
@@ -367,11 +369,8 @@ mod tests {
 				step += 3;
 			}
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 11,
-					oldest_block_to_keep: 14,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 11, oldest_block_to_keep: 14 },
 			);
 
 			// now let's insert block signed by validator 1
@@ -394,11 +393,8 @@ mod tests {
 			.unwrap();
 			assert_eq!(finalized_blocks, expected_blocks);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 15,
-					oldest_block_to_keep: 15,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 15, oldest_block_to_keep: 15 },
 			);
 		});
 	}
@@ -485,9 +481,7 @@ mod tests {
 			let header1 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(0)
-					.step(2)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(0).step(2).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header1);
@@ -497,9 +491,7 @@ mod tests {
 			let header2 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(1)
-					.step(3)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(1).step(3).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header2);
@@ -509,9 +501,7 @@ mod tests {
 			let header3 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(2)
-					.step(4)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(2).step(4).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header3);
@@ -554,19 +544,19 @@ mod tests {
 			assert_eq!(storage.best_block().0, header5_1);
 			assert_eq!(storage.finalized_block(), header1);
 
-			// when we import header4 { parent = header3 }, authored by validator[0], header2 is finalized
+			// when we import header4 { parent = header3 }, authored by validator[0], header2 is
+			// finalized
 			let header4 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
-				HeaderBuilder::with_parent_number(3)
-					.step(5)
-					.sign_by_set(&ctx.validators),
+				HeaderBuilder::with_parent_number(3).step(5).sign_by_set(&ctx.validators),
 			)
 			.unwrap();
 			assert_eq!(storage.best_block().0, header5_1);
 			assert_eq!(storage.finalized_block(), header2);
 
-			// when we import header5 { parent = header4 }, authored by validator[1], header3 is finalized
+			// when we import header5 { parent = header4 }, authored by validator[1], header3 is
+			// finalized
 			let header5 = import_custom_block(
 				&mut storage,
 				&ctx.validators,
@@ -578,7 +568,8 @@ mod tests {
 			assert_eq!(storage.best_block().0, header5);
 			assert_eq!(storage.finalized_block(), header3);
 
-			// import of header2'' { parent = header1 } fails, because it has number < best_finalized
+			// import of header2'' { parent = header1 } fails, because it has number <
+			// best_finalized
 			assert_eq!(
 				import_custom_block(
 					&mut storage,
diff --git a/polkadot/bridges/modules/ethereum/src/lib.rs b/polkadot/bridges/modules/ethereum/src/lib.rs
index b25b2b8e635e23f0c60dfecd57f5e1aae04f8762..b978ef5b742b27e1b78b01bd7e1633bc0901bcd6 100644
--- a/polkadot/bridges/modules/ethereum/src/lib.rs
+++ b/polkadot/bridges/modules/ethereum/src/lib.rs
@@ -19,17 +19,14 @@
 #![allow(clippy::large_enum_variant)]
 
 use crate::finality::{CachedFinalityVotes, FinalityVotes};
-use bp_eth_poa::{Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256};
-use codec::{Decode, Encode};
-use frame_support::{decl_module, decl_storage, traits::Get};
-use sp_runtime::{
-	transaction_validity::{
-		InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionSource, TransactionValidity,
-		UnknownTransaction, ValidTransaction,
-	},
-	RuntimeDebug,
+use bp_eth_poa::{
+	Address, AuraHeader, HeaderId, RawTransaction, RawTransactionReceipt, Receipt, H256, U256,
 };
-use sp_std::{cmp::Ord, collections::btree_map::BTreeMap, prelude::*};
+use codec::{Decode, Encode};
+use frame_support::traits::Get;
+use scale_info::TypeInfo;
+use sp_runtime::RuntimeDebug;
+use sp_std::{boxed::Box, cmp::Ord, collections::btree_map::BTreeMap, prelude::*};
 
 pub use validators::{ValidatorsConfiguration, ValidatorsSource};
 
@@ -85,7 +82,7 @@ pub struct PoolConfiguration {
 }
 
 /// Block header as it is stored in the runtime storage.
-#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct StoredHeader<Submitter> {
 	/// Submitter of this header. May be `None` if header has been submitted
 	/// using unsigned transaction.
@@ -106,19 +103,19 @@ pub struct StoredHeader<Submitter> {
 }
 
 /// Validators set as it is stored in the runtime storage.
-#[derive(Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 #[cfg_attr(test, derive(Clone))]
 pub struct ValidatorsSet {
 	/// Validators of this set.
 	pub validators: Vec<Address>,
-	/// Hash of the block where this set has been signalled. None if this is the first set.
+	/// Hash of the block where this set has been signaled. None if this is the first set.
 	pub signal_block: Option<HeaderId>,
 	/// Hash of the block where this set has been enacted.
 	pub enact_block: HeaderId,
 }
 
 /// Validators set change as it is stored in the runtime storage.
-#[derive(Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 #[cfg_attr(test, derive(Clone))]
 pub struct AuraScheduledChange {
 	/// Validators of this set.
@@ -162,15 +159,15 @@ pub struct ChangeToEnact {
 }
 
 /// Blocks range that we want to prune.
-#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq)]
+#[derive(Encode, Decode, Default, RuntimeDebug, Clone, PartialEq, TypeInfo)]
 struct PruningRange {
 	/// Number of the oldest unpruned block(s). This might be the block that we do not
 	/// want to prune now (then it is equal to `oldest_block_to_keep`), or block that we
 	/// were unable to prune for whatever reason (i.e. if it isn't finalized yet and has
 	/// scheduled validators set change).
 	pub oldest_unpruned_block: u64,
-	/// Number of oldest block(s) that we want to keep. We want to prune blocks in range
-	/// [`oldest_unpruned_block`; `oldest_block_to_keep`).
+	/// Number of the oldest block(s) that we want to keep. We want to prune blocks in range
+	/// [ `oldest_unpruned_block`; `oldest_block_to_keep` ).
 	pub oldest_block_to_keep: u64,
 }
 
@@ -224,14 +221,11 @@ impl<Submitter> ImportContext<Submitter> {
 		&self.validators_set
 	}
 
-	/// Returns reference to the latest block which has signalled change of validators set.
-	/// This may point to parent if parent has signalled change.
+	/// Returns reference to the latest block which has signaled change of validators set.
+	/// This may point to parent if parent has signaled change.
 	pub fn last_signal_block(&self) -> Option<HeaderId> {
 		match self.parent_scheduled_change {
-			Some(_) => Some(HeaderId {
-				number: self.parent_header.number,
-				hash: self.parent_hash,
-			}),
+			Some(_) => Some(HeaderId { number: self.parent_header.number, hash: self.parent_hash }),
 			None => self.last_signal_block,
 		}
 	}
@@ -316,11 +310,11 @@ pub trait PruningStrategy: Default {
 	/// Pallet may prune both finalized and unfinalized blocks. But it can't give any
 	/// guarantees on when it will happen. Example: if some unfinalized block at height N
 	/// has scheduled validators set change, then the module won't prune any blocks with
-	/// number >= N even if strategy allows that.
+	/// number greater than or equal to N even if strategy allows that.
 	///
 	/// If your strategy allows pruning unfinalized blocks, this could lead to switch
-	/// between finalized forks (only if authorities are misbehaving). But since 50%+1 (or 2/3)
-	/// authorities are able to do whatever they want with the chain, this isn't considered
+	/// between finalized forks (only if authorities are misbehaving). But since 50 percent plus one
+	/// (or 2/3) authorities are able to do whatever they want with the chain, this isn't considered
 	/// fatal. If your strategy only prunes finalized blocks, we'll never be able to finalize
 	/// header that isn't descendant of current best finalized block.
 	fn pruning_upper_bound(&mut self, best_number: u64, best_finalized_number: u64) -> u64;
@@ -349,10 +343,10 @@ impl ChainTime for () {
 pub trait OnHeadersSubmitted<AccountId> {
 	/// Called when valid headers have been submitted.
 	///
-	/// The submitter **must not** be rewarded for submitting valid headers, because greedy authority
-	/// could produce and submit multiple valid headers (without relaying them to other peers) and
-	/// get rewarded. Instead, the provider could track submitters and stop rewarding if too many
-	/// headers have been submitted without finalization.
+	/// The submitter **must not** be rewarded for submitting valid headers, because greedy
+	/// authority could produce and submit multiple valid headers (without relaying them to other
+	/// peers) and get rewarded. Instead, the provider could track submitters and stop rewarding if
+	/// too many headers have been submitted without finalization.
 	fn on_valid_headers_submitted(submitter: AccountId, useful: u64, useless: u64);
 	/// Called when invalid headers have been submitted.
 	fn on_invalid_headers_submitted(submitter: AccountId);
@@ -369,34 +363,53 @@ impl<AccountId> OnHeadersSubmitted<AccountId> for () {
 	fn on_valid_headers_finalized(_submitter: AccountId, _finalized: u64) {}
 }
 
-/// The module configuration trait.
-pub trait Config<I = DefaultInstance>: frame_system::Config {
-	/// Aura configuration.
-	type AuraConfiguration: Get<AuraConfiguration>;
-	/// Validators configuration.
-	type ValidatorsConfiguration: Get<validators::ValidatorsConfiguration>;
+pub use pallet::*;
 
-	/// Interval (in blocks) for for finality votes caching.
-	/// If None, cache is disabled.
-	///
-	/// Ideally, this should either be None (when we are sure that there won't
-	/// be any significant finalization delays), or something that is bit larger
-	/// than average finalization delay.
-	type FinalityVotesCachingInterval: Get<Option<u64>>;
-	/// Headers pruning strategy.
-	type PruningStrategy: PruningStrategy;
-	/// Header timestamp verification against current on-chain time.
-	type ChainTime: ChainTime;
-
-	/// Handler for headers submission result.
-	type OnHeadersSubmitted: OnHeadersSubmitted<Self::AccountId>;
-}
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::config]
+	pub trait Config<I: 'static = ()>: frame_system::Config {
+		/// Aura configuration.
+		type AuraConfiguration: Get<AuraConfiguration>;
+		/// Validators configuration.
+		type ValidatorsConfiguration: Get<validators::ValidatorsConfiguration>;
+
+		/// Interval (in blocks) for for finality votes caching.
+		/// If None, cache is disabled.
+		///
+		/// Ideally, this should either be None (when we are sure that there won't
+		/// be any significant finalization delays), or something that is bit larger
+		/// than average finalization delay.
+		type FinalityVotesCachingInterval: Get<Option<u64>>;
+		/// Headers pruning strategy.
+		type PruningStrategy: PruningStrategy;
+		/// Header timestamp verification against current on-chain time.
+		type ChainTime: ChainTime;
+
+		/// Handler for headers submission result.
+		type OnHeadersSubmitted: OnHeadersSubmitted<Self::AccountId>;
+	}
 
-decl_module! {
-	pub struct Module<T: Config<I>, I: Instance = DefaultInstance> for enum Call where origin: T::Origin {
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
+
+	#[pallet::hooks]
+	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {}
+
+	#[pallet::call]
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		/// Import single Aura header. Requires transaction to be **UNSIGNED**.
-		#[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
-		pub fn import_unsigned_header(origin, header: AuraHeader, receipts: Option<Vec<Receipt>>) {
+		#[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
+		pub fn import_unsigned_header(
+			origin: OriginFor<T>,
+			header: Box<AuraHeader>,
+			receipts: Option<Vec<Receipt>>,
+		) -> DispatchResult {
 			frame_system::ensure_none(origin)?;
 
 			import::import_header(
@@ -405,10 +418,13 @@ decl_module! {
 				&T::AuraConfiguration::get(),
 				&T::ValidatorsConfiguration::get(),
 				None,
-				header,
+				*header,
 				&T::ChainTime::default(),
 				receipts,
-			).map_err(|e| e.msg())?;
+			)
+			.map_err(|e| e.msg())?;
+
+			Ok(())
 		}
 
 		/// Import Aura chain headers in a single **SIGNED** transaction.
@@ -417,8 +433,11 @@ decl_module! {
 		///
 		/// This should be used with caution - passing too many headers could lead to
 		/// enormous block production/import time.
-		#[weight = 0] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
-		pub fn import_signed_headers(origin, headers_with_receipts: Vec<(AuraHeader, Option<Vec<Receipt>>)>) {
+		#[pallet::weight(0)] // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78)
+		pub fn import_signed_headers(
+			origin: OriginFor<T>,
+			headers_with_receipts: Vec<(AuraHeader, Option<Vec<Receipt>>)>,
+		) -> DispatchResult {
 			let submitter = frame_system::ensure_signed(origin)?;
 			let mut finalized_headers = BTreeMap::new();
 			let import_result = import::import_headers(
@@ -435,10 +454,7 @@ decl_module! {
 			// if we have finalized some headers, we will reward their submitters even
 			// if current submitter has provided some invalid headers
 			for (f_submitter, f_count) in finalized_headers {
-				T::OnHeadersSubmitted::on_valid_headers_finalized(
-					f_submitter,
-					f_count,
-				);
+				T::OnHeadersSubmitted::on_valid_headers_finalized(f_submitter, f_count);
 			}
 
 			// now track/penalize current submitter for providing new headers
@@ -449,63 +465,138 @@ decl_module! {
 					// even though we may have accept some headers, we do not want to reward someone
 					// who provides invalid headers
 					T::OnHeadersSubmitted::on_invalid_headers_submitted(submitter);
-					return Err(error.msg().into());
+					return Err(error.msg().into())
 				},
 			}
+
+			Ok(())
 		}
 	}
-}
 
-decl_storage! {
-	trait Store for Pallet<T: Config<I>, I: Instance = DefaultInstance> as Bridge {
-		/// Best known block.
-		BestBlock: (HeaderId, U256);
-		/// Best finalized block.
-		FinalizedBlock: HeaderId;
-		/// Range of blocks that we want to prune.
-		BlocksToPrune: PruningRange;
-		/// Map of imported headers by hash.
-		Headers: map hasher(identity) H256 => Option<StoredHeader<T::AccountId>>;
-		/// Map of imported header hashes by number.
-		HeadersByNumber: map hasher(blake2_128_concat) u64 => Option<Vec<H256>>;
-		/// Map of cached finality data by header hash.
-		FinalityCache: map hasher(identity) H256 => Option<FinalityVotes<T::AccountId>>;
-		/// The ID of next validator set.
-		NextValidatorsSetId: u64;
-		/// Map of validators sets by their id.
-		ValidatorsSets: map hasher(twox_64_concat) u64 => Option<ValidatorsSet>;
-		/// Validators sets reference count. Each header that is authored by this set increases
-		/// the reference count. When we prune this header, we decrease the reference count.
-		/// When it reaches zero, we are free to prune validator set as well.
-		ValidatorsSetsRc: map hasher(twox_64_concat) u64 => Option<u64>;
-		/// Map of validators set changes scheduled by given header.
-		ScheduledChanges: map hasher(identity) H256 => Option<AuraScheduledChange>;
+	#[pallet::validate_unsigned]
+	impl<T: Config<I>, I: 'static> ValidateUnsigned for Pallet<T, I> {
+		type Call = Call<T, I>;
+
+		fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
+			match *call {
+				Self::Call::import_unsigned_header { ref header, ref receipts } => {
+					let accept_result = verification::accept_aura_header_into_pool(
+						&BridgeStorage::<T, I>::new(),
+						&T::AuraConfiguration::get(),
+						&T::ValidatorsConfiguration::get(),
+						&pool_configuration(),
+						header,
+						&T::ChainTime::default(),
+						receipts.as_ref(),
+					);
+
+					match accept_result {
+						Ok((requires, provides)) => Ok(ValidTransaction {
+							priority: TransactionPriority::max_value(),
+							requires,
+							provides,
+							longevity: TransactionLongevity::max_value(),
+							propagate: true,
+						}),
+						// UnsignedTooFarInTheFuture is the special error code used to limit
+						// number of transactions in the pool - we do not want to ban transaction
+						// in this case (see verification.rs for details)
+						Err(error::Error::UnsignedTooFarInTheFuture) => UnknownTransaction::Custom(
+							error::Error::UnsignedTooFarInTheFuture.code(),
+						)
+						.into(),
+						Err(error) => InvalidTransaction::Custom(error.code()).into(),
+					}
+				},
+				_ => InvalidTransaction::Call.into(),
+			}
+		}
+	}
+
+	/// Best known block.
+	#[pallet::storage]
+	pub(super) type BestBlock<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, (HeaderId, U256), ValueQuery>;
+
+	/// Best finalized block.
+	#[pallet::storage]
+	pub(super) type FinalizedBlock<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, HeaderId, ValueQuery>;
+
+	/// Range of blocks that we want to prune.
+	#[pallet::storage]
+	pub(super) type BlocksToPrune<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, PruningRange, ValueQuery>;
+
+	/// Map of imported headers by hash.
+	#[pallet::storage]
+	pub(super) type Headers<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, StoredHeader<T::AccountId>>;
+
+	/// Map of imported header hashes by number.
+	#[pallet::storage]
+	pub(super) type HeadersByNumber<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Blake2_128Concat, u64, Vec<H256>>;
+
+	/// Map of cached finality data by header hash.
+	#[pallet::storage]
+	pub(super) type FinalityCache<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, FinalityVotes<T::AccountId>>;
+
+	/// The ID of next validator set.
+	#[pallet::storage]
+	pub(super) type NextValidatorsSetId<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, u64, ValueQuery>;
+
+	/// Map of validators sets by their id.
+	#[pallet::storage]
+	pub(super) type ValidatorsSets<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Twox64Concat, u64, ValidatorsSet>;
+
+	/// Validators sets reference count. Each header that is authored by this set increases
+	/// the reference count. When we prune this header, we decrease the reference count.
+	/// When it reaches zero, we are free to prune validator set as well.
+	#[pallet::storage]
+	pub(super) type ValidatorsSetsRc<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Twox64Concat, u64, u64>;
+
+	/// Map of validators set changes scheduled by given header.
+	#[pallet::storage]
+	pub(super) type ScheduledChanges<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, AuraScheduledChange>;
+
+	#[pallet::genesis_config]
+	#[cfg_attr(feature = "std", derive(Default))]
+	pub struct GenesisConfig {
+		/// PoA header to start with.
+		pub initial_header: AuraHeader,
+		/// Initial PoA chain difficulty.
+		pub initial_difficulty: U256,
+		/// Initial PoA validators set.
+		pub initial_validators: Vec<Address>,
 	}
-	add_extra_genesis {
-		config(initial_header): AuraHeader;
-		config(initial_difficulty): U256;
-		config(initial_validators): Vec<Address>;
-		build(|config| {
+
+	#[pallet::genesis_build]
+	impl<T: Config<I>, I: 'static> GenesisBuild<T, I> for GenesisConfig {
+		fn build(&self) {
 			// the initial blocks should be selected so that:
 			// 1) it doesn't signal validators changes;
 			// 2) there are no scheduled validators changes from previous blocks;
-			// 3) (implied) all direct children of initial block are authored by the same validators set.
+			// 3) (implied) all direct children of initial block are authored by the same validators
+			// set.
 
-			assert!(
-				!config.initial_validators.is_empty(),
-				"Initial validators set can't be empty",
-			);
+			assert!(!self.initial_validators.is_empty(), "Initial validators set can't be empty",);
 
 			initialize_storage::<T, I>(
-				&config.initial_header,
-				config.initial_difficulty,
-				&config.initial_validators,
+				&self.initial_header,
+				self.initial_difficulty,
+				&self.initial_validators,
 			);
-		})
+		}
 	}
 }
 
-impl<T: Config<I>, I: Instance> Pallet<T, I> {
+impl<T: Config<I>, I: 'static> Pallet<T, I> {
 	/// Returns number and hash of the best block known to the bridge module.
 	/// The caller should only submit `import_header` transaction that makes
 	/// (or leads to making) other header the best one.
@@ -542,49 +633,11 @@ impl<T: Config<I>, I: Instance> Pallet<T, I> {
 	}
 }
 
-impl<T: Config<I>, I: Instance> frame_support::unsigned::ValidateUnsigned for Pallet<T, I> {
-	type Call = Call<T, I>;
-
-	fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity {
-		match *call {
-			Self::Call::import_unsigned_header(ref header, ref receipts) => {
-				let accept_result = verification::accept_aura_header_into_pool(
-					&BridgeStorage::<T, I>::new(),
-					&T::AuraConfiguration::get(),
-					&T::ValidatorsConfiguration::get(),
-					&pool_configuration(),
-					header,
-					&T::ChainTime::default(),
-					receipts.as_ref(),
-				);
-
-				match accept_result {
-					Ok((requires, provides)) => Ok(ValidTransaction {
-						priority: TransactionPriority::max_value(),
-						requires,
-						provides,
-						longevity: TransactionLongevity::max_value(),
-						propagate: true,
-					}),
-					// UnsignedTooFarInTheFuture is the special error code used to limit
-					// number of transactions in the pool - we do not want to ban transaction
-					// in this case (see verification.rs for details)
-					Err(error::Error::UnsignedTooFarInTheFuture) => {
-						UnknownTransaction::Custom(error::Error::UnsignedTooFarInTheFuture.code()).into()
-					}
-					Err(error) => InvalidTransaction::Custom(error.code()).into(),
-				}
-			}
-			_ => InvalidTransaction::Call.into(),
-		}
-	}
-}
-
 /// Runtime bridge storage.
 #[derive(Default)]
-pub struct BridgeStorage<T, I = DefaultInstance>(sp_std::marker::PhantomData<(T, I)>);
+pub struct BridgeStorage<T, I: 'static = ()>(sp_std::marker::PhantomData<(T, I)>);
 
-impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
+impl<T: Config<I>, I: 'static> BridgeStorage<T, I> {
 	/// Create new BridgeStorage.
 	pub fn new() -> Self {
 		BridgeStorage(sp_std::marker::PhantomData::<(T, I)>::default())
@@ -592,7 +645,7 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 
 	/// Prune old blocks.
 	fn prune_blocks(&self, mut max_blocks_to_prune: u64, finalized_number: u64, prune_end: u64) {
-		let pruning_range = BlocksToPrune::<I>::get();
+		let pruning_range = BlocksToPrune::<T, I>::get();
 		let mut new_pruning_range = pruning_range.clone();
 
 		// update oldest block we want to keep
@@ -607,11 +660,11 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 		for number in begin..end {
 			// if we can't prune anything => break
 			if max_blocks_to_prune == 0 {
-				break;
+				break
 			}
 
 			// read hashes of blocks with given number and try to prune these blocks
-			let blocks_at_number = HeadersByNumber::<I>::take(number);
+			let blocks_at_number = HeadersByNumber::<T, I>::take(number);
 			if let Some(mut blocks_at_number) = blocks_at_number {
 				self.prune_blocks_by_hashes(
 					&mut max_blocks_to_prune,
@@ -622,8 +675,8 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 
 				// if we haven't pruned all blocks, remember unpruned
 				if !blocks_at_number.is_empty() {
-					HeadersByNumber::<I>::insert(number, blocks_at_number);
-					break;
+					HeadersByNumber::<T, I>::insert(number, blocks_at_number);
+					break
 				}
 			}
 
@@ -638,7 +691,7 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 
 		// update pruning range in storage
 		if pruning_range != new_pruning_range {
-			BlocksToPrune::<I>::put(new_pruning_range);
+			BlocksToPrune::<T, I>::put(new_pruning_range);
 		}
 	}
 
@@ -651,8 +704,10 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 		blocks_at_number: &mut Vec<H256>,
 	) {
 		// ensure that unfinalized headers we want to prune do not have scheduled changes
-		if number > finalized_number && blocks_at_number.iter().any(ScheduledChanges::<I>::contains_key) {
-			return;
+		if number > finalized_number &&
+			blocks_at_number.iter().any(ScheduledChanges::<T, I>::contains_key)
+		{
+			return
 		}
 
 		// physically remove headers and (probably) obsolete validators sets
@@ -665,10 +720,10 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 				hash,
 			);
 
-			ScheduledChanges::<I>::remove(hash);
+			ScheduledChanges::<T, I>::remove(hash);
 			FinalityCache::<T, I>::remove(hash);
 			if let Some(header) = header {
-				ValidatorsSetsRc::<I>::mutate(header.next_validators_set_id, |rc| match *rc {
+				ValidatorsSetsRc::<T, I>::mutate(header.next_validators_set_id, |rc| match *rc {
 					Some(rc) if rc > 1 => Some(rc - 1),
 					_ => None,
 				});
@@ -677,21 +732,21 @@ impl<T: Config<I>, I: Instance> BridgeStorage<T, I> {
 			// check if we have already pruned too much headers in this call
 			*max_blocks_to_prune -= 1;
 			if *max_blocks_to_prune == 0 {
-				return;
+				return
 			}
 		}
 	}
 }
 
-impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
+impl<T: Config<I>, I: 'static> Storage for BridgeStorage<T, I> {
 	type Submitter = T::AccountId;
 
 	fn best_block(&self) -> (HeaderId, U256) {
-		BestBlock::<I>::get()
+		BestBlock::<T, I>::get()
 	}
 
 	fn finalized_block(&self) -> HeaderId {
-		FinalizedBlock::<I>::get()
+		FinalizedBlock::<T, I>::get()
 	}
 
 	fn header(&self, hash: &H256) -> Option<(AuraHeader, Option<Self::Submitter>)> {
@@ -708,21 +763,22 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 		let mut current_id = *parent;
 		loop {
 			// if we have reached finalized block's sibling => stop with special signal
-			if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash {
+			if current_id.number == best_finalized.number && current_id.hash != best_finalized.hash
+			{
 				votes.stopped_at_finalized_sibling = true;
-				return votes;
+				return votes
 			}
 
 			// if we have reached target header => stop
 			if stop_at(&current_id.hash) {
-				return votes;
+				return votes
 			}
 
 			// if we have found cached votes => stop
 			let cached_votes = FinalityCache::<T, I>::get(&current_id.hash);
 			if let Some(cached_votes) = cached_votes {
 				votes.votes = Some(cached_votes);
-				return votes;
+				return votes
 			}
 
 			// read next parent header id
@@ -750,9 +806,11 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 		parent_hash: &H256,
 	) -> Option<ImportContext<Self::Submitter>> {
 		Headers::<T, I>::get(parent_hash).map(|parent_header| {
-			let validators_set = ValidatorsSets::<I>::get(parent_header.next_validators_set_id)
-				.expect("validators set is only pruned when last ref is pruned; there is a ref; qed");
-			let parent_scheduled_change = ScheduledChanges::<I>::get(parent_hash);
+			let validators_set = ValidatorsSets::<T, I>::get(parent_header.next_validators_set_id)
+				.expect(
+					"validators set is only pruned when last ref is pruned; there is a ref; qed",
+				);
+			let parent_scheduled_change = ScheduledChanges::<T, I>::get(parent_hash);
 			ImportContext {
 				submitter,
 				parent_hash: *parent_hash,
@@ -767,15 +825,15 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 	}
 
 	fn scheduled_change(&self, hash: &H256) -> Option<AuraScheduledChange> {
-		ScheduledChanges::<I>::get(hash)
+		ScheduledChanges::<T, I>::get(hash)
 	}
 
 	fn insert_header(&mut self, header: HeaderToImport<Self::Submitter>) {
 		if header.is_best {
-			BestBlock::<I>::put((header.id, header.total_difficulty));
+			BestBlock::<T, I>::put((header.id, header.total_difficulty));
 		}
 		if let Some(scheduled_change) = header.scheduled_change {
-			ScheduledChanges::<I>::insert(
+			ScheduledChanges::<T, I>::insert(
 				&header.id.hash,
 				AuraScheduledChange {
 					validators: scheduled_change,
@@ -785,12 +843,12 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 		}
 		let next_validators_set_id = match header.enacted_change {
 			Some(enacted_change) => {
-				let next_validators_set_id = NextValidatorsSetId::<I>::mutate(|set_id| {
+				let next_validators_set_id = NextValidatorsSetId::<T, I>::mutate(|set_id| {
 					let next_set_id = *set_id;
 					*set_id += 1;
 					next_set_id
 				});
-				ValidatorsSets::<I>::insert(
+				ValidatorsSets::<T, I>::insert(
 					next_validators_set_id,
 					ValidatorsSet {
 						validators: enacted_change.validators,
@@ -798,21 +856,22 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 						signal_block: enacted_change.signal_block,
 					},
 				);
-				ValidatorsSetsRc::<I>::insert(next_validators_set_id, 1);
+				ValidatorsSetsRc::<T, I>::insert(next_validators_set_id, 1);
 				next_validators_set_id
-			}
+			},
 			None => {
-				ValidatorsSetsRc::<I>::mutate(header.context.validators_set_id, |rc| {
+				ValidatorsSetsRc::<T, I>::mutate(header.context.validators_set_id, |rc| {
 					*rc = Some(rc.map(|rc| rc + 1).unwrap_or(1));
 					*rc
 				});
 				header.context.validators_set_id
-			}
+			},
 		};
 
 		let finality_votes_caching_interval = T::FinalityVotesCachingInterval::get();
 		if let Some(finality_votes_caching_interval) = finality_votes_caching_interval {
-			let cache_entry_required = header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0;
+			let cache_entry_required =
+				header.id.number != 0 && header.id.number % finality_votes_caching_interval == 0;
 			if cache_entry_required {
 				FinalityCache::<T, I>::insert(header.id.hash, header.finality_votes);
 			}
@@ -826,7 +885,7 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 		);
 
 		let last_signal_block = header.context.last_signal_block();
-		HeadersByNumber::<I>::append(header.id.number, header.id.hash);
+		HeadersByNumber::<T, I>::append(header.id.number, header.id.hash);
 		Headers::<T, I>::insert(
 			&header.id.hash,
 			StoredHeader {
@@ -844,7 +903,7 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 		let finalized_number = finalized
 			.as_ref()
 			.map(|f| f.number)
-			.unwrap_or_else(|| FinalizedBlock::<I>::get().number);
+			.unwrap_or_else(|| FinalizedBlock::<T, I>::get().number);
 		if let Some(finalized) = finalized {
 			log::trace!(
 				target: "runtime",
@@ -853,7 +912,7 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 				finalized.hash,
 			);
 
-			FinalizedBlock::<I>::put(finalized);
+			FinalizedBlock::<T, I>::put(finalized);
 		}
 
 		// and now prune headers if we need to
@@ -863,7 +922,7 @@ impl<T: Config<I>, I: Instance> Storage for BridgeStorage<T, I> {
 
 /// Initialize storage.
 #[cfg(any(feature = "std", feature = "runtime-benchmarks"))]
-pub(crate) fn initialize_storage<T: Config<I>, I: Instance>(
+pub(crate) fn initialize_storage<T: Config<I>, I: 'static>(
 	initial_header: &AuraHeader,
 	initial_difficulty: U256,
 	initial_validators: &[Address],
@@ -876,17 +935,14 @@ pub(crate) fn initialize_storage<T: Config<I>, I: Instance>(
 		initial_hash,
 	);
 
-	let initial_id = HeaderId {
-		number: initial_header.number,
-		hash: initial_hash,
-	};
-	BestBlock::<I>::put((initial_id, initial_difficulty));
-	FinalizedBlock::<I>::put(initial_id);
-	BlocksToPrune::<I>::put(PruningRange {
+	let initial_id = HeaderId { number: initial_header.number, hash: initial_hash };
+	BestBlock::<T, I>::put((initial_id, initial_difficulty));
+	FinalizedBlock::<T, I>::put(initial_id);
+	BlocksToPrune::<T, I>::put(PruningRange {
 		oldest_unpruned_block: initial_header.number,
 		oldest_block_to_keep: initial_header.number,
 	});
-	HeadersByNumber::<I>::insert(initial_header.number, vec![initial_hash]);
+	HeadersByNumber::<T, I>::insert(initial_header.number, vec![initial_hash]);
 	Headers::<T, I>::insert(
 		initial_hash,
 		StoredHeader {
@@ -897,8 +953,8 @@ pub(crate) fn initialize_storage<T: Config<I>, I: Instance>(
 			last_signal_block: None,
 		},
 	);
-	NextValidatorsSetId::<I>::put(1);
-	ValidatorsSets::<I>::insert(
+	NextValidatorsSetId::<T, I>::put(1);
+	ValidatorsSets::<T, I>::insert(
 		0,
 		ValidatorsSet {
 			validators: initial_validators.to_vec(),
@@ -906,7 +962,7 @@ pub(crate) fn initialize_storage<T: Config<I>, I: Instance>(
 			enact_block: initial_id,
 		},
 	);
-	ValidatorsSetsRc::<I>::insert(0, 1);
+	ValidatorsSetsRc::<T, I>::insert(0, 1);
 }
 
 /// Verify that transaction is included into given finalized block.
@@ -924,7 +980,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			proof.len(),
 		);
 
-		return false;
+		return false
 	}
 
 	let header = match storage.header(&block) {
@@ -936,8 +992,8 @@ pub fn verify_transaction_finalized<S: Storage>(
 				block,
 			);
 
-			return false;
-		}
+			return false
+		},
 	};
 	let finalized = storage.finalized_block();
 
@@ -951,7 +1007,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			finalized.number,
 		);
 
-		return false;
+		return false
 	}
 
 	// check if header is actually finalized
@@ -969,7 +1025,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			finalized.hash,
 		);
 
-		return false;
+		return false
 	}
 
 	// verify that transaction is included in the block
@@ -981,7 +1037,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			computed_root,
 		);
 
-		return false;
+		return false
 	}
 
 	// verify that transaction receipt is included in the block
@@ -993,7 +1049,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			computed_root,
 		);
 
-		return false;
+		return false
 	}
 
 	// check that transaction has completed successfully
@@ -1007,7 +1063,7 @@ pub fn verify_transaction_finalized<S: Storage>(
 			);
 
 			false
-		}
+		},
 		Err(err) => {
 			log::trace!(
 				target: "runtime",
@@ -1016,23 +1072,24 @@ pub fn verify_transaction_finalized<S: Storage>(
 			);
 
 			false
-		}
+		},
 	}
 }
 
 /// Transaction pool configuration.
 fn pool_configuration() -> PoolConfiguration {
-	PoolConfiguration {
-		max_future_number_difference: 10,
-	}
+	PoolConfiguration { max_future_number_difference: 10 }
 }
 
 /// Return iterator of given header ancestors.
-fn ancestry<S: Storage>(storage: &'_ S, mut parent_hash: H256) -> impl Iterator<Item = (H256, AuraHeader)> + '_ {
+fn ancestry<S: Storage>(
+	storage: &'_ S,
+	mut parent_hash: H256,
+) -> impl Iterator<Item = (H256, AuraHeader)> + '_ {
 	sp_std::iter::from_fn(move || {
 		let (header, _) = storage.header(&parent_hash)?;
 		if header.number == 0 {
-			return None;
+			return None
 		}
 
 		let hash = parent_hash;
@@ -1044,12 +1101,14 @@ fn ancestry<S: Storage>(storage: &'_ S, mut parent_hash: H256) -> impl Iterator<
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::finality::FinalityAncestor;
-	use crate::mock::{
-		genesis, insert_header, run_test, run_test_with_genesis, validators_addresses, HeaderBuilder, TestRuntime,
-		GAS_LIMIT,
+	use crate::{
+		finality::FinalityAncestor,
+		mock::{
+			genesis, insert_header, run_test, run_test_with_genesis, validators_addresses,
+			HeaderBuilder, TestRuntime, GAS_LIMIT,
+		},
+		test_utils::validator_utils::*,
 	};
-	use crate::test_utils::validator_utils::*;
 	use bp_eth_poa::compute_merkle_root;
 
 	const TOTAL_VALIDATORS: usize = 3;
@@ -1112,7 +1171,7 @@ pub(crate) mod tests {
 					);
 
 					if i == 7 && j == 1 {
-						ScheduledChanges::<DefaultInstance>::insert(
+						ScheduledChanges::<TestRuntime, ()>::insert(
 							hash,
 							AuraScheduledChange {
 								validators: validators_addresses(5),
@@ -1121,7 +1180,7 @@ pub(crate) mod tests {
 						);
 					}
 				}
-				HeadersByNumber::<DefaultInstance>::insert(i, headers_by_number);
+				HeadersByNumber::<TestRuntime, ()>::insert(i, headers_by_number);
 			}
 
 			f(BridgeStorage::new())
@@ -1131,20 +1190,17 @@ pub(crate) mod tests {
 	#[test]
 	fn blocks_are_not_pruned_if_range_is_empty() {
 		with_headers_to_prune(|storage| {
-			BlocksToPrune::<DefaultInstance>::put(PruningRange {
+			BlocksToPrune::<TestRuntime, ()>::put(PruningRange {
 				oldest_unpruned_block: 5,
 				oldest_block_to_keep: 5,
 			});
 
 			// try to prune blocks [5; 10)
 			storage.prune_blocks(0xFFFF, 10, 5);
-			assert_eq!(HeadersByNumber::<DefaultInstance>::get(&5).unwrap().len(), 5);
+			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&5).unwrap().len(), 5);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 5,
-					oldest_block_to_keep: 5,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 },
 			);
 		});
 	}
@@ -1152,7 +1208,7 @@ pub(crate) mod tests {
 	#[test]
 	fn blocks_to_prune_never_shrinks_from_the_end() {
 		with_headers_to_prune(|storage| {
-			BlocksToPrune::<DefaultInstance>::put(PruningRange {
+			BlocksToPrune::<TestRuntime, ()>::put(PruningRange {
 				oldest_unpruned_block: 0,
 				oldest_block_to_keep: 5,
 			});
@@ -1160,11 +1216,8 @@ pub(crate) mod tests {
 			// try to prune blocks [5; 10)
 			storage.prune_blocks(0xFFFF, 10, 3);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 5,
-					oldest_block_to_keep: 5,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 5, oldest_block_to_keep: 5 },
 			);
 		});
 	}
@@ -1174,16 +1227,13 @@ pub(crate) mod tests {
 		with_headers_to_prune(|storage| {
 			// try to prune blocks [0; 10)
 			storage.prune_blocks(0, 10, 10);
-			assert!(HeadersByNumber::<DefaultInstance>::get(&0).is_some());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&1).is_some());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&2).is_some());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&3).is_some());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&0).is_some());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&1).is_some());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&2).is_some());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&3).is_some());
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 0,
-					oldest_block_to_keep: 10,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 0, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1194,33 +1244,27 @@ pub(crate) mod tests {
 			// try to prune blocks [0; 10)
 			storage.prune_blocks(7, 10, 10);
 			// 1 headers with number = 0 is pruned (1 total)
-			assert!(HeadersByNumber::<DefaultInstance>::get(&0).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&0).is_none());
 			// 5 headers with number = 1 are pruned (6 total)
-			assert!(HeadersByNumber::<DefaultInstance>::get(&1).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&1).is_none());
 			// 1 header with number = 2 are pruned (7 total)
-			assert_eq!(HeadersByNumber::<DefaultInstance>::get(&2).unwrap().len(), 4);
+			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&2).unwrap().len(), 4);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 2,
-					oldest_block_to_keep: 10,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 2, oldest_block_to_keep: 10 },
 			);
 
 			// try to prune blocks [2; 10)
 			storage.prune_blocks(11, 10, 10);
 			// 4 headers with number = 2 are pruned (4 total)
-			assert!(HeadersByNumber::<DefaultInstance>::get(&2).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&2).is_none());
 			// 5 headers with number = 3 are pruned (9 total)
-			assert!(HeadersByNumber::<DefaultInstance>::get(&3).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&3).is_none());
 			// 2 headers with number = 4 are pruned (11 total)
-			assert_eq!(HeadersByNumber::<DefaultInstance>::get(&4).unwrap().len(), 3);
+			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&4).unwrap().len(), 3);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 4,
-					oldest_block_to_keep: 10,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 4, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1233,20 +1277,17 @@ pub(crate) mod tests {
 			// and one of blocks#7 has scheduled change
 			// => we won't prune any block#7 at all
 			storage.prune_blocks(0xFFFF, 5, 10);
-			assert!(HeadersByNumber::<DefaultInstance>::get(&0).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&1).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&2).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&3).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&4).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&5).is_none());
-			assert!(HeadersByNumber::<DefaultInstance>::get(&6).is_none());
-			assert_eq!(HeadersByNumber::<DefaultInstance>::get(&7).unwrap().len(), 5);
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&0).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&1).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&2).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&3).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&4).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&5).is_none());
+			assert!(HeadersByNumber::<TestRuntime, ()>::get(&6).is_none());
+			assert_eq!(HeadersByNumber::<TestRuntime, ()>::get(&7).unwrap().len(), 5);
 			assert_eq!(
-				BlocksToPrune::<DefaultInstance>::get(),
-				PruningRange {
-					oldest_unpruned_block: 7,
-					oldest_block_to_keep: 10,
-				},
+				BlocksToPrune::<TestRuntime, ()>::get(),
+				PruningRange { oldest_unpruned_block: 7, oldest_block_to_keep: 10 },
 			);
 		});
 	}
@@ -1266,13 +1307,14 @@ pub(crate) mod tests {
 			}
 
 			// for header with number = interval, cache entry is created
-			let header_with_entry = HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators);
+			let header_with_entry =
+				HeaderBuilder::with_parent_number(interval - 1).sign_by_set(&ctx.validators);
 			let header_with_entry_hash = header_with_entry.compute_hash();
 			insert_header(&mut storage, header_with_entry);
 			assert!(FinalityCache::<TestRuntime>::get(&header_with_entry_hash).is_some());
 
 			// when we later prune this header, cache entry is removed
-			BlocksToPrune::<DefaultInstance>::put(PruningRange {
+			BlocksToPrune::<TestRuntime, ()>::put(PruningRange {
 				oldest_unpruned_block: interval - 1,
 				oldest_block_to_keep: interval - 1,
 			});
@@ -1313,10 +1355,7 @@ pub(crate) mod tests {
 			let votes_at_3 = FinalityVotes {
 				votes: vec![([42; 20].into(), 21)].into_iter().collect(),
 				ancestry: vec![FinalityAncestor {
-					id: HeaderId {
-						number: 100,
-						hash: Default::default(),
-					},
+					id: HeaderId { number: 100, hash: Default::default() },
 					..Default::default()
 				}]
 				.into_iter()
@@ -1359,7 +1398,7 @@ pub(crate) mod tests {
 			insert_header(&mut storage, header1s);
 
 			// header1 is finalized
-			FinalizedBlock::<DefaultInstance>::put(header1_id);
+			FinalizedBlock::<TestRuntime, ()>::put(header1_id);
 
 			// trying to get finality votes when importing header2 -> header1 succeeds
 			assert!(
diff --git a/polkadot/bridges/modules/ethereum/src/mock.rs b/polkadot/bridges/modules/ethereum/src/mock.rs
index c8102cdb0b05d82f905b078d4262990411fa58e0..69665876520e7908df73ec87c1d8e7f05804ccb5 100644
--- a/polkadot/bridges/modules/ethereum/src/mock.rs
+++ b/polkadot/bridges/modules/ethereum/src/mock.rs
@@ -17,14 +17,18 @@
 // From construct_runtime macro
 #![allow(clippy::from_over_into)]
 
-pub use crate::test_utils::{insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT};
+pub use crate::test_utils::{
+	insert_header, validator_utils::*, validators_change_receipt, HeaderBuilder, GAS_LIMIT,
+};
 pub use bp_eth_poa::signatures::secret_to_address;
 
-use crate::validators::{ValidatorsConfiguration, ValidatorsSource};
-use crate::{AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy};
+use crate::{
+	validators::{ValidatorsConfiguration, ValidatorsSource},
+	AuraConfiguration, ChainTime, Config, GenesisConfig as CrateGenesisConfig, PruningStrategy,
+};
 use bp_eth_poa::{Address, AuraHeader, H256, U256};
-use frame_support::{parameter_types, weights::Weight};
-use secp256k1::SecretKey;
+use frame_support::{parameter_types, traits::GenesisBuild, weights::Weight};
+use libsecp256k1::SecretKey;
 use sp_runtime::{
 	testing::Header as SubstrateHeader,
 	traits::{BlakeTwo256, IdentityLookup},
@@ -146,23 +150,15 @@ pub fn run_test_with_genesis<T>(
 ) -> T {
 	let validators = validators(total_validators);
 	let addresses = validators_addresses(total_validators);
-	sp_io::TestExternalities::new(
-		CrateGenesisConfig {
+	sp_io::TestExternalities::from(
+		GenesisBuild::<TestRuntime>::build_storage(&CrateGenesisConfig {
 			initial_header: genesis.clone(),
 			initial_difficulty: 0.into(),
 			initial_validators: addresses.clone(),
-		}
-		.build_storage::<TestRuntime, crate::DefaultInstance>()
+		})
 		.unwrap(),
 	)
-	.execute_with(|| {
-		test(TestContext {
-			genesis,
-			total_validators,
-			validators,
-			addresses,
-		})
-	})
+	.execute_with(|| test(TestContext { genesis, total_validators, validators, addresses }))
 }
 
 /// Pruning strategy that keeps 10 headers behind best block.
diff --git a/polkadot/bridges/modules/ethereum/src/test_utils.rs b/polkadot/bridges/modules/ethereum/src/test_utils.rs
index 41161089ba6d07e0eb056a66890558df80fe8553..414445f3aaccb3652b90619ae6044bee538496a8 100644
--- a/polkadot/bridges/modules/ethereum/src/test_utils.rs
+++ b/polkadot/bridges/modules/ethereum/src/test_utils.rs
@@ -19,22 +19,22 @@
 //! Although the name implies that it is used by tests, it shouldn't be be used _directly_ by tests.
 //! Instead these utilities should be used by the Mock runtime, which in turn is used by tests.
 //!
-//! On the other hand, they may be used directly by the bechmarking module.
+//! On the other hand, they may be used directly by the benchmark module.
 
 // Since this is test code it's fine that not everything is used
 #![allow(dead_code)]
 
-use crate::finality::FinalityVotes;
-use crate::validators::CHANGE_EVENT_HASH;
-use crate::verification::calculate_score;
-use crate::{Config, HeaderToImport, Storage};
+use crate::{
+	finality::FinalityVotes, validators::CHANGE_EVENT_HASH, verification::calculate_score, Config,
+	HeaderToImport, Storage,
+};
 
 use bp_eth_poa::{
 	rlp_encode,
 	signatures::{secret_to_address, sign, SignHeader},
 	Address, AuraHeader, Bloom, Receipt, SealedEmptyStep, H256, U256,
 };
-use secp256k1::SecretKey;
+use libsecp256k1::SecretKey;
 use sp_std::prelude::*;
 
 /// Gas limit valid in test environment.
@@ -63,30 +63,28 @@ impl HeaderBuilder {
 	/// Creates default header on top of test parent with given hash.
 	#[cfg(test)]
 	pub fn with_parent_hash(parent_hash: H256) -> Self {
-		Self::with_parent_hash_on_runtime::<crate::mock::TestRuntime, crate::DefaultInstance>(parent_hash)
+		Self::with_parent_hash_on_runtime::<crate::mock::TestRuntime, ()>(parent_hash)
 	}
 
 	/// Creates default header on top of test parent with given number. First parent is selected.
 	#[cfg(test)]
 	pub fn with_parent_number(parent_number: u64) -> Self {
-		Self::with_parent_number_on_runtime::<crate::mock::TestRuntime, crate::DefaultInstance>(parent_number)
+		Self::with_parent_number_on_runtime::<crate::mock::TestRuntime, ()>(parent_number)
 	}
 
 	/// Creates default header on top of parent with given hash.
-	pub fn with_parent_hash_on_runtime<T: Config<I>, I: crate::Instance>(parent_hash: H256) -> Self {
+	pub fn with_parent_hash_on_runtime<T: Config<I>, I: 'static>(parent_hash: H256) -> Self {
 		use crate::Headers;
-		use frame_support::StorageMap;
 
 		let parent_header = Headers::<T, I>::get(&parent_hash).unwrap().header;
 		Self::with_parent(&parent_header)
 	}
 
 	/// Creates default header on top of parent with given number. First parent is selected.
-	pub fn with_parent_number_on_runtime<T: Config<I>, I: crate::Instance>(parent_number: u64) -> Self {
+	pub fn with_parent_number_on_runtime<T: Config<I>, I: 'static>(parent_number: u64) -> Self {
 		use crate::HeadersByNumber;
-		use frame_support::StorageMap;
 
-		let parent_hash = HeadersByNumber::<I>::get(parent_number).unwrap()[0];
+		let parent_hash = HeadersByNumber::<T, I>::get(parent_number).unwrap()[0];
 		Self::with_parent_hash_on_runtime::<T, I>(parent_hash)
 	}
 
@@ -132,10 +130,7 @@ impl HeaderBuilder {
 		let sealed_empty_steps = empty_steps
 			.iter()
 			.map(|(author, step)| {
-				let mut empty_step = SealedEmptyStep {
-					step: *step,
-					signature: Default::default(),
-				};
+				let mut empty_step = SealedEmptyStep { step: *step, signature: Default::default() };
 				let message = empty_step.message(&self.header.parent_hash);
 				let signature: [u8; 65] = sign(author, message).into();
 				empty_step.signature = signature.into();
@@ -218,7 +213,11 @@ pub fn build_genesis_header(author: &SecretKey) -> AuraHeader {
 }
 
 /// Helper function for building a custom child header which has been signed by an authority.
-pub fn build_custom_header<F>(author: &SecretKey, previous: &AuraHeader, customize_header: F) -> AuraHeader
+pub fn build_custom_header<F>(
+	author: &SecretKey,
+	previous: &AuraHeader,
+	customize_header: F,
+) -> AuraHeader
 where
 	F: FnOnce(AuraHeader) -> AuraHeader,
 {
@@ -234,7 +233,8 @@ pub fn insert_header<S: Storage>(storage: &mut S, header: AuraHeader) {
 	let id = header.compute_id();
 	let best_finalized = storage.finalized_block();
 	let import_context = storage.import_context(None, &header.parent_hash).unwrap();
-	let parent_finality_votes = storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false);
+	let parent_finality_votes =
+		storage.cached_finality_votes(&header.parent_id().unwrap(), &best_finalized, |_| false);
 	let finality_votes = crate::finality::prepare_votes(
 		parent_finality_votes,
 		best_finalized,
@@ -286,9 +286,10 @@ pub fn validators_change_receipt(parent_hash: H256) -> Receipt {
 			address: [3; 20].into(),
 			topics: vec![CHANGE_EVENT_HASH.into(), parent_hash],
 			data: vec![
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7,
-				7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+				0, 0, 0, 0, 0, 0, 0, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+				7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
 			],
 		}],
 	}
diff --git a/polkadot/bridges/modules/ethereum/src/validators.rs b/polkadot/bridges/modules/ethereum/src/validators.rs
index f9add9f2d80cf6b5cfc3f7a21ab95e79e2be5609..c38e8ddf5727c3b6d66d1876f075b94028375f1c 100644
--- a/polkadot/bridges/modules/ethereum/src/validators.rs
+++ b/polkadot/bridges/modules/ethereum/src/validators.rs
@@ -14,15 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::{ChangeToEnact, Storage};
+use crate::{error::Error, ChangeToEnact, Storage};
 use bp_eth_poa::{Address, AuraHeader, HeaderId, LogEntry, Receipt, U256};
 use sp_std::prelude::*;
 
 /// The hash of InitiateChange event of the validators set contract.
 pub(crate) const CHANGE_EVENT_HASH: &[u8; 32] = &[
-	0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f, 0xd2, 0xc2, 0x28,
-	0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89,
+	0x55, 0x25, 0x2f, 0xa6, 0xee, 0xe4, 0x74, 0x1b, 0x4e, 0x24, 0xa7, 0x4a, 0x70, 0xe9, 0xc1, 0x1f,
+	0xd2, 0xc2, 0x28, 0x1d, 0xf8, 0xd6, 0xea, 0x13, 0x12, 0x6f, 0xf8, 0x45, 0xf7, 0x82, 0x5c, 0x89,
 ];
 
 /// Where source of validators addresses come from. This covers the chain lifetime.
@@ -104,7 +103,8 @@ impl<'a> Validators<'a> {
 		if next_starts_at == header.number {
 			match *next_source {
 				ValidatorsSource::List(ref new_list) => return Ok((None, Some(new_list.clone()))),
-				ValidatorsSource::Contract(_, ref new_list) => return Ok((Some(new_list.clone()), None)),
+				ValidatorsSource::Contract(_, ref new_list) =>
+					return Ok((Some(new_list.clone()), None)),
 			}
 		}
 
@@ -128,12 +128,13 @@ impl<'a> Validators<'a> {
 		.bloom();
 
 		if !header.log_bloom.contains(&expected_bloom) {
-			return Ok((None, None));
+			return Ok((None, None))
 		}
 
 		let receipts = receipts.ok_or(Error::MissingTransactionsReceipts)?;
+		#[allow(clippy::question_mark)]
 		if header.check_receipts_root(&receipts).is_err() {
-			return Err(Error::TransactionsReceiptsMismatch);
+			return Err(Error::TransactionsReceiptsMismatch)
 		}
 
 		// iterate in reverse because only the _last_ change in a given
@@ -145,24 +146,24 @@ impl<'a> Validators<'a> {
 				.filter(|r| r.log_bloom.contains(&expected_bloom))
 				.flat_map(|r| r.logs.iter())
 				.filter(|l| {
-					l.address == *contract_address
-						&& l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH
-						&& l.topics[1] == header.parent_hash
+					l.address == *contract_address &&
+						l.topics.len() == 2 && l.topics[0].as_fixed_bytes() == CHANGE_EVENT_HASH &&
+						l.topics[1] == header.parent_hash
 				})
 				.filter_map(|l| {
 					let data_len = l.data.len();
 					if data_len < 64 {
-						return None;
+						return None
 					}
 
 					let new_validators_len_u256 = U256::from_big_endian(&l.data[32..64]);
 					let new_validators_len = new_validators_len_u256.low_u64();
 					if new_validators_len_u256 != new_validators_len.into() {
-						return None;
+						return None
 					}
 
 					if (data_len - 64) as u64 != new_validators_len.saturating_mul(32) {
-						return None;
+						return None
 					}
 
 					Some(
@@ -188,7 +189,10 @@ impl<'a> Validators<'a> {
 		finalized_blocks: &[(HeaderId, Option<S::Submitter>)],
 	) -> Option<ChangeToEnact> {
 		// if we haven't finalized any blocks, no changes may be finalized
-		let newest_finalized_id = finalized_blocks.last().map(|(id, _)| id)?;
+		let newest_finalized_id = match finalized_blocks.last().map(|(id, _)| id) {
+			Some(last_finalized_id) => last_finalized_id,
+			None => return None,
+		};
 		let oldest_finalized_id = finalized_blocks
 			.first()
 			.map(|(id, _)| id)
@@ -213,12 +217,10 @@ impl<'a> Validators<'a> {
 				}
 			})
 			.and_then(|signal_block| {
-				storage
-					.scheduled_change(&signal_block.hash)
-					.map(|change| ChangeToEnact {
-						signal_block: Some(signal_block),
-						validators: change.validators,
-					})
+				storage.scheduled_change(&signal_block.hash).map(|change| ChangeToEnact {
+					signal_block: Some(signal_block),
+					validators: change.validators,
+				})
 			})
 	}
 
@@ -240,7 +242,11 @@ impl<'a> Validators<'a> {
 	}
 
 	/// Returns source of validators that should author the next header.
-	fn source_at_next_header(&self, header_source_index: usize, header_number: u64) -> (u64, &ValidatorsSource) {
+	fn source_at_next_header(
+		&self,
+		header_source_index: usize,
+		header_number: u64,
+	) -> (u64, &ValidatorsSource) {
 		match self.config {
 			ValidatorsConfiguration::Single(ref source) => (0, source),
 			ValidatorsConfiguration::Multi(ref sources) => {
@@ -248,13 +254,13 @@ impl<'a> Validators<'a> {
 				if next_source_index < sources.len() {
 					let next_source = &sources[next_source_index];
 					if next_source.0 < header_number + 1 {
-						return (next_source.0, &next_source.1);
+						return (next_source.0, &next_source.1)
 					}
 				}
 
 				let source = &sources[header_source_index];
 				(source.0, &source.1)
-			}
+			},
 		}
 	}
 }
@@ -272,11 +278,11 @@ impl ValidatorsSource {
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime};
-	use crate::DefaultInstance;
-	use crate::{AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader};
+	use crate::{
+		mock::{run_test, validators_addresses, validators_change_receipt, TestRuntime},
+		AuraScheduledChange, BridgeStorage, Headers, ScheduledChanges, StoredHeader,
+	};
 	use bp_eth_poa::compute_merkle_root;
-	use frame_support::StorageMap;
 
 	const TOTAL_VALIDATORS: usize = 3;
 
@@ -289,10 +295,7 @@ pub(crate) mod tests {
 		]);
 		let validators = Validators::new(&config);
 
-		assert_eq!(
-			validators.source_at(99),
-			(0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),
-		);
+		assert_eq!(validators.source_at(99), (0, 0, &ValidatorsSource::List(vec![[1; 20].into()])),);
 		assert_eq!(
 			validators.source_at_next_header(0, 99),
 			(0, &ValidatorsSource::List(vec![[1; 20].into()])),
@@ -320,12 +323,12 @@ pub(crate) mod tests {
 	#[test]
 	fn maybe_signals_validators_change_works() {
 		// when contract is active, but bloom has no required bits set
-		let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+		let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+			Default::default(),
+			Vec::new(),
+		));
 		let validators = Validators::new(&config);
-		let mut header = AuraHeader {
-			number: u64::MAX,
-			..Default::default()
-		};
+		let mut header = AuraHeader { number: u64::max_value(), ..Default::default() };
 		assert!(!validators.maybe_signals_validators_change(&header));
 
 		// when contract is active and bloom has required bits set
@@ -346,10 +349,7 @@ pub(crate) mod tests {
 			(200, ValidatorsSource::Contract([3; 20].into(), vec![[3; 20].into()])),
 		]);
 		let validators = Validators::new(&config);
-		let mut header = AuraHeader {
-			number: 100,
-			..Default::default()
-		};
+		let mut header = AuraHeader { number: 100, ..Default::default() };
 
 		// when we're at the block that switches to list source
 		assert_eq!(
@@ -405,26 +405,20 @@ pub(crate) mod tests {
 
 	fn try_finalize_with_scheduled_change(scheduled_at: Option<HeaderId>) -> Option<ChangeToEnact> {
 		run_test(TOTAL_VALIDATORS, |_| {
-			let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+			let config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				Default::default(),
+				Vec::new(),
+			));
 			let validators = Validators::new(&config);
 			let storage = BridgeStorage::<TestRuntime>::new();
 
 			// when we're finailizing blocks 10...100
-			let id10 = HeaderId {
-				number: 10,
-				hash: [10; 32].into(),
-			};
-			let id100 = HeaderId {
-				number: 100,
-				hash: [100; 32].into(),
-			};
+			let id10 = HeaderId { number: 10, hash: [10; 32].into() };
+			let id100 = HeaderId { number: 100, hash: [100; 32].into() };
 			let finalized_blocks = vec![(id10, None), (id100, None)];
 			let header100 = StoredHeader::<u64> {
 				submitter: None,
-				header: AuraHeader {
-					number: 100,
-					..Default::default()
-				},
+				header: AuraHeader { number: 100, ..Default::default() },
 				total_difficulty: 0.into(),
 				next_validators_set_id: 0,
 				last_signal_block: scheduled_at,
@@ -435,7 +429,7 @@ pub(crate) mod tests {
 			};
 			Headers::<TestRuntime>::insert(id100.hash, header100);
 			if let Some(scheduled_at) = scheduled_at {
-				ScheduledChanges::<DefaultInstance>::insert(scheduled_at.hash, scheduled_change);
+				ScheduledChanges::<TestRuntime, ()>::insert(scheduled_at.hash, scheduled_change);
 			}
 
 			validators.finalize_validators_change(&storage, &finalized_blocks)
@@ -444,16 +438,10 @@ pub(crate) mod tests {
 
 	#[test]
 	fn finalize_validators_change_finalizes_scheduled_change() {
-		let id50 = HeaderId {
-			number: 50,
-			..Default::default()
-		};
+		let id50 = HeaderId { number: 50, ..Default::default() };
 		assert_eq!(
 			try_finalize_with_scheduled_change(Some(id50)),
-			Some(ChangeToEnact {
-				signal_block: Some(id50),
-				validators: validators_addresses(1),
-			}),
+			Some(ChangeToEnact { signal_block: Some(id50), validators: validators_addresses(1) }),
 		);
 	}
 
@@ -464,10 +452,7 @@ pub(crate) mod tests {
 
 	#[test]
 	fn finalize_validators_change_does_not_finalize_changes_when_they_are_outside_of_range() {
-		let id5 = HeaderId {
-			number: 5,
-			..Default::default()
-		};
-		assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None);
+		let id5 = HeaderId { number: 5, ..Default::default() };
+		assert_eq!(try_finalize_with_scheduled_change(Some(id5)), None,);
 	}
 }
diff --git a/polkadot/bridges/modules/ethereum/src/verification.rs b/polkadot/bridges/modules/ethereum/src/verification.rs
index 68a17ff391de5500494ff29ac48ad50a140939f9..9c5cfcf3afe731a6a14b0012849f7dca86ad019f 100644
--- a/polkadot/bridges/modules/ethereum/src/verification.rs
+++ b/polkadot/bridges/modules/ethereum/src/verification.rs
@@ -14,11 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::error::Error;
-use crate::validators::{Validators, ValidatorsConfiguration};
-use crate::{AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage};
+use crate::{
+	error::Error,
+	validators::{Validators, ValidatorsConfiguration},
+	AuraConfiguration, AuraScheduledChange, ChainTime, ImportContext, PoolConfiguration, Storage,
+};
 use bp_eth_poa::{
-	public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep, H256, H520, U128, U256,
+	public_to_address, step_validator, Address, AuraHeader, HeaderId, Receipt, SealedEmptyStep,
+	H256, H520, U128, U256,
 };
 use codec::Encode;
 use sp_io::crypto::secp256k1_ecdsa_recover;
@@ -28,22 +31,25 @@ use sp_std::{vec, vec::Vec};
 /// Pre-check to see if should try and import this header.
 /// Returns error if we should not try to import this block.
 /// Returns ID of passed header and best finalized header.
-pub fn is_importable_header<S: Storage>(storage: &S, header: &AuraHeader) -> Result<(HeaderId, HeaderId), Error> {
+pub fn is_importable_header<S: Storage>(
+	storage: &S,
+	header: &AuraHeader,
+) -> Result<(HeaderId, HeaderId), Error> {
 	// we never import any header that competes with finalized header
 	let finalized_id = storage.finalized_block();
 	if header.number <= finalized_id.number {
-		return Err(Error::AncientHeader);
+		return Err(Error::AncientHeader)
 	}
 	// we never import any header with known hash
 	let id = header.compute_id();
 	if storage.header(&id.hash).is_some() {
-		return Err(Error::KnownHeader);
+		return Err(Error::KnownHeader)
 	}
 
 	Ok((id, finalized_id))
 }
 
-/// Try accept unsigned aura header into transaction pool.
+/// Try to accept unsigned aura header into transaction pool.
 ///
 /// Returns required and provided tags.
 pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
@@ -64,7 +70,8 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 	// we want to avoid having same headers twice in the pool
 	// => we're strict about receipts here - if we need them, we require receipts to be Some,
 	// otherwise we require receipts to be None
-	let receipts_required = Validators::new(validators_config).maybe_signals_validators_change(header);
+	let receipts_required =
+		Validators::new(validators_config).maybe_signals_validators_change(header);
 	match (receipts_required, receipts.is_some()) {
 		(true, false) => return Err(Error::MissingTransactionsReceipts),
 		(false, true) => return Err(Error::RedundantTransactionsReceipts),
@@ -78,7 +85,7 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 	let (best_id, _) = storage.best_block();
 	let difference = header.number.saturating_sub(best_id.number);
 	if difference > pool_config.max_future_number_difference {
-		return Err(Error::UnsignedTooFarInTheFuture);
+		return Err(Error::UnsignedTooFarInTheFuture)
 	}
 
 	// TODO: only accept new headers when we're at the tip of PoA chain
@@ -104,11 +111,8 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 
 			// since our parent is already in the storage, we do not require it
 			// to be in the transaction pool
-			(
-				vec![],
-				vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag],
-			)
-		}
+			(vec![], vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag])
+		},
 		None => {
 			// we know nothing about parent header
 			// => the best thing we can do is to believe that there are no forks in
@@ -119,34 +123,38 @@ pub fn accept_aura_header_into_pool<S: Storage, CT: ChainTime>(
 				"import context is None only when header is missing from the storage;\
 							best header is always in the storage; qed",
 			);
-			let validators_check_result =
-				validator_checks(config, &best_context.validators_set().validators, header, header_step);
+			let validators_check_result = validator_checks(
+				config,
+				&best_context.validators_set().validators,
+				header,
+				header_step,
+			);
 			if let Err(error) = validators_check_result {
-				find_next_validators_signal(storage, &best_context)
-					.ok_or(error)
-					.and_then(|next_validators| validator_checks(config, &next_validators, header, header_step))?;
+				find_next_validators_signal(storage, &best_context).ok_or(error).and_then(
+					|next_validators| {
+						validator_checks(config, &next_validators, header, header_step)
+					},
+				)?;
 			}
 
 			// since our parent is missing from the storage, we **DO** require it
 			// to be in the transaction pool
 			// (- 1 can't underflow because there's always best block in the header)
-			let requires_header_number_and_hash_tag = HeaderId {
-				number: header.number - 1,
-				hash: header.parent_hash,
-			}
-			.encode();
+			let requires_header_number_and_hash_tag =
+				HeaderId { number: header.number - 1, hash: header.parent_hash }.encode();
 			(
 				vec![requires_header_number_and_hash_tag],
 				vec![provides_number_and_authority_tag, provides_header_number_and_hash_tag],
 			)
-		}
+		},
 	};
 
 	// the heaviest, but rare operation - we do not want invalid receipts in the pool
 	if let Some(receipts) = receipts {
 		log::trace!(target: "runtime", "Got receipts! {:?}", receipts);
+		#[allow(clippy::question_mark)]
 		if header.check_receipts_root(receipts).is_err() {
-			return Err(Error::TransactionsReceiptsMismatch);
+			return Err(Error::TransactionsReceiptsMismatch)
 		}
 	}
 
@@ -189,32 +197,32 @@ fn contextless_checks<CT: ChainTime>(
 ) -> Result<(), Error> {
 	let expected_seal_fields = expected_header_seal_fields(config, header);
 	if header.seal.len() != expected_seal_fields {
-		return Err(Error::InvalidSealArity);
+		return Err(Error::InvalidSealArity)
 	}
-	if header.number >= u64::MAX {
-		return Err(Error::RidiculousNumber);
+	if header.number >= u64::max_value() {
+		return Err(Error::RidiculousNumber)
 	}
 	if header.gas_used > header.gas_limit {
-		return Err(Error::TooMuchGasUsed);
+		return Err(Error::TooMuchGasUsed)
 	}
 	if header.gas_limit < config.min_gas_limit {
-		return Err(Error::InvalidGasLimit);
+		return Err(Error::InvalidGasLimit)
 	}
 	if header.gas_limit > config.max_gas_limit {
-		return Err(Error::InvalidGasLimit);
+		return Err(Error::InvalidGasLimit)
 	}
 	if header.number != 0 && header.extra_data.len() as u64 > config.maximum_extra_data_size {
-		return Err(Error::ExtraDataOutOfBounds);
+		return Err(Error::ExtraDataOutOfBounds)
 	}
 
 	// we can't detect if block is from future in runtime
 	// => let's only do an overflow check
-	if header.timestamp > i32::MAX as u64 {
-		return Err(Error::TimestampOverflow);
+	if header.timestamp > i32::max_value() as u64 {
+		return Err(Error::TimestampOverflow)
 	}
 
 	if chain_time.is_timestamp_ahead(header.timestamp) {
-		return Err(Error::HeaderTimestampIsAhead);
+		return Err(Error::HeaderTimestampIsAhead)
 	}
 
 	Ok(())
@@ -233,15 +241,16 @@ fn contextual_checks<Submitter>(
 
 	// Ensure header is from the step after context.
 	if header_step == parent_step {
-		return Err(Error::DoubleVote);
+		return Err(Error::DoubleVote)
 	}
 	#[allow(clippy::suspicious_operation_groupings)]
 	if header.number >= config.validate_step_transition && header_step < parent_step {
-		return Err(Error::DoubleVote);
+		return Err(Error::DoubleVote)
 	}
 
-	// If empty step messages are enabled we will validate the messages in the seal, missing messages are not
-	// reported as there's no way to tell whether the empty step message was never sent or simply not included.
+	// If empty step messages are enabled we will validate the messages in the seal, missing
+	// messages are not reported as there's no way to tell whether the empty step message was never
+	// sent or simply not included.
 	let empty_steps_len = match header.number >= config.empty_steps_transition {
 		true => {
 			let strict_empty_steps = header.number >= config.strict_empty_steps_transition;
@@ -251,16 +260,16 @@ fn contextual_checks<Submitter>(
 
 			for empty_step in empty_steps {
 				if empty_step.step <= parent_step || empty_step.step >= header_step {
-					return Err(Error::InsufficientProof);
+					return Err(Error::InsufficientProof)
 				}
 
 				if !verify_empty_step(&header.parent_hash, &empty_step, validators) {
-					return Err(Error::InsufficientProof);
+					return Err(Error::InsufficientProof)
 				}
 
 				if strict_empty_steps {
 					if empty_step.step <= prev_empty_step {
-						return Err(Error::InsufficientProof);
+						return Err(Error::InsufficientProof)
 					}
 
 					prev_empty_step = empty_step.step;
@@ -268,7 +277,7 @@ fn contextual_checks<Submitter>(
 			}
 
 			empty_steps_len
-		}
+		},
 		false => 0,
 	};
 
@@ -276,7 +285,7 @@ fn contextual_checks<Submitter>(
 	if header.number >= config.validate_score_transition {
 		let expected_difficulty = calculate_score(parent_step, header_step, empty_steps_len as _);
 		if header.difficulty != expected_difficulty {
-			return Err(Error::InvalidDifficulty);
+			return Err(Error::InvalidDifficulty)
 		}
 	}
 
@@ -292,16 +301,17 @@ fn validator_checks(
 ) -> Result<(), Error> {
 	let expected_validator = *step_validator(validators, header_step);
 	if header.author != expected_validator {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	let validator_signature = header.signature().ok_or(Error::MissingSignature)?;
 	let header_seal_hash = header
 		.seal_hash(header.number >= config.empty_steps_transition)
 		.ok_or(Error::MissingEmptySteps)?;
-	let is_invalid_proposer = !verify_signature(&expected_validator, &validator_signature, &header_seal_hash);
+	let is_invalid_proposer =
+		!verify_signature(&expected_validator, &validator_signature, &header_seal_hash);
 	if is_invalid_proposer {
-		return Err(Error::NotValidator);
+		return Err(Error::NotValidator)
 	}
 
 	Ok(())
@@ -323,9 +333,14 @@ fn verify_empty_step(parent_hash: &H256, step: &SealedEmptyStep, validators: &[A
 	verify_signature(&expected_validator, &step.signature, &message)
 }
 
-/// Chain scoring: total weight is sqrt(U256::MAX)*height - step
-pub(crate) fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 {
-	U256::from(U128::MAX) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps)
+/// Chain scoring: total `weight is sqrt(U256::max_value())*height - step`.
+pub(crate) fn calculate_score(
+	parent_step: u64,
+	current_step: u64,
+	current_empty_steps: usize,
+) -> U256 {
+	U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) +
+		U256::from(current_empty_steps)
 }
 
 /// Verify that the signature over message has been produced by given validator.
@@ -337,7 +352,10 @@ fn verify_signature(expected_validator: &Address, signature: &H520, message: &H2
 }
 
 /// Find next unfinalized validators set change after finalized set.
-fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<S::Submitter>) -> Option<Vec<Address>> {
+fn find_next_validators_signal<S: Storage>(
+	storage: &S,
+	context: &ImportContext<S::Submitter>,
+) -> Option<Vec<Address>> {
 	// that's the earliest block number we may met in following loop
 	// it may be None if that's the first set
 	let best_set_signal_block = context.validators_set().signal_block;
@@ -352,14 +370,15 @@ fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<
 		// next_current_block_hash points to the block that schedules next
 		// change
 		let current_scheduled_set = match current_set_signal_block {
-			Some(current_set_signal_block) if Some(&current_set_signal_block) == best_set_signal_block.as_ref() => {
-				return next_scheduled_set.map(|scheduled_set| scheduled_set.validators)
-			}
+			Some(current_set_signal_block)
+				if Some(&current_set_signal_block) == best_set_signal_block.as_ref() =>
+				return next_scheduled_set.map(|scheduled_set| scheduled_set.validators),
 			None => return next_scheduled_set.map(|scheduled_set| scheduled_set.validators),
-			Some(current_set_signal_block) => storage.scheduled_change(&current_set_signal_block.hash).expect(
-				"header that is associated with this change is not pruned;\
+			Some(current_set_signal_block) =>
+				storage.scheduled_change(&current_set_signal_block.hash).expect(
+					"header that is associated with this change is not pruned;\
 					scheduled changes are only removed when header is pruned; qed",
-			),
+				),
 		};
 
 		current_set_signal_block = current_scheduled_set.prev_signal_block;
@@ -370,20 +389,20 @@ fn find_next_validators_signal<S: Storage>(storage: &S, context: &ImportContext<
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{
-		insert_header, run_test_with_genesis, test_aura_config, validator, validator_address, validators_addresses,
-		validators_change_receipt, AccountId, ConstChainTime, HeaderBuilder, TestRuntime, GAS_LIMIT,
-	};
-	use crate::validators::ValidatorsSource;
-	use crate::DefaultInstance;
 	use crate::{
-		pool_configuration, BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId,
+		mock::{
+			insert_header, run_test_with_genesis, test_aura_config, validator, validator_address,
+			validators_addresses, validators_change_receipt, AccountId, ConstChainTime,
+			HeaderBuilder, TestRuntime, GAS_LIMIT,
+		},
+		pool_configuration,
+		validators::ValidatorsSource,
+		BridgeStorage, FinalizedBlock, Headers, HeadersByNumber, NextValidatorsSetId,
 		ScheduledChanges, ValidatorsSet, ValidatorsSets,
 	};
 	use bp_eth_poa::{compute_merkle_root, rlp_encode, TransactionOutcome, H520, U256};
-	use frame_support::{StorageMap, StorageValue};
 	use hex_literal::hex;
-	use secp256k1::SecretKey;
+	use libsecp256k1::SecretKey;
 	use sp_runtime::transaction_validity::TransactionTag;
 
 	const GENESIS_STEP: u64 = 42;
@@ -393,7 +412,10 @@ mod tests {
 		HeaderBuilder::genesis().step(GENESIS_STEP).sign_by(&validator(0))
 	}
 
-	fn verify_with_config(config: &AuraConfiguration, header: &AuraHeader) -> Result<ImportContext<AccountId>, Error> {
+	fn verify_with_config(
+		config: &AuraConfiguration,
+		header: &AuraHeader,
+	) -> Result<ImportContext<AccountId>, Error> {
 		run_test_with_genesis(genesis(), TOTAL_VALIDATORS, |_| {
 			let storage = BridgeStorage::<TestRuntime>::new();
 			verify_aura_header(&storage, config, None, header, &ConstChainTime::default())
@@ -418,10 +440,12 @@ mod tests {
 			let block3 = HeaderBuilder::with_parent_number(2).sign_by_set(&validators);
 			insert_header(&mut storage, block3);
 
-			FinalizedBlock::<DefaultInstance>::put(block2_id);
+			FinalizedBlock::<TestRuntime, ()>::put(block2_id);
 
-			let validators_config =
-				ValidatorsConfiguration::Single(ValidatorsSource::Contract(Default::default(), Vec::new()));
+			let validators_config = ValidatorsConfiguration::Single(ValidatorsSource::Contract(
+				Default::default(),
+				Vec::new(),
+			));
 			let (header, receipts) = make_header(&validators);
 			accept_aura_header_into_pool(
 				&storage,
@@ -435,22 +459,26 @@ mod tests {
 		})
 	}
 
-	fn change_validators_set_at(number: u64, finalized_set: Vec<Address>, signalled_set: Option<Vec<Address>>) {
-		let set_id = NextValidatorsSetId::<DefaultInstance>::get();
-		NextValidatorsSetId::<DefaultInstance>::put(set_id + 1);
-		ValidatorsSets::<DefaultInstance>::insert(
+	fn change_validators_set_at(
+		number: u64,
+		finalized_set: Vec<Address>,
+		signalled_set: Option<Vec<Address>>,
+	) {
+		let set_id = NextValidatorsSetId::<TestRuntime, ()>::get();
+		NextValidatorsSetId::<TestRuntime, ()>::put(set_id + 1);
+		ValidatorsSets::<TestRuntime, ()>::insert(
 			set_id,
 			ValidatorsSet {
 				validators: finalized_set,
 				signal_block: None,
 				enact_block: HeaderId {
 					number: 0,
-					hash: HeadersByNumber::<DefaultInstance>::get(&0).unwrap()[0],
+					hash: HeadersByNumber::<TestRuntime, ()>::get(&0).unwrap()[0],
 				},
 			},
 		);
 
-		let header_hash = HeadersByNumber::<DefaultInstance>::get(&number).unwrap()[0];
+		let header_hash = HeadersByNumber::<TestRuntime, ()>::get(&number).unwrap()[0];
 		let mut header = Headers::<TestRuntime>::get(&header_hash).unwrap();
 		header.next_validators_set_id = set_id;
 		if let Some(signalled_set) = signalled_set {
@@ -458,12 +486,9 @@ mod tests {
 				number: header.header.number - 1,
 				hash: header.header.parent_hash,
 			});
-			ScheduledChanges::<DefaultInstance>::insert(
+			ScheduledChanges::<TestRuntime, ()>::insert(
 				header.header.parent_hash,
-				AuraScheduledChange {
-					validators: signalled_set,
-					prev_signal_block: None,
-				},
+				AuraScheduledChange { validators: signalled_set, prev_signal_block: None },
 			);
 		}
 
@@ -522,21 +547,15 @@ mod tests {
 		config.max_gas_limit = 200.into();
 
 		// when limit is lower than expected
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(50.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(50.into()).sign_by(&validator(0));
 		assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 
 		// when limit is larger than expected
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(250.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(250.into()).sign_by(&validator(0));
 		assert_eq!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 
 		// when limit is within expected range
-		let header = HeaderBuilder::with_number(1)
-			.gas_limit(150.into())
-			.sign_by(&validator(0));
+		let header = HeaderBuilder::with_number(1).gas_limit(150.into()).sign_by(&validator(0));
 		assert_ne!(verify_with_config(&config, &header), Err(Error::InvalidGasLimit));
 	}
 
@@ -564,9 +583,8 @@ mod tests {
 		assert_eq!(default_verify(&header), Err(Error::TimestampOverflow));
 
 		// when timestamp doesn't overflow i32
-		let header = HeaderBuilder::with_number(1)
-			.timestamp(i32::MAX as u64)
-			.sign_by(&validator(0));
+		let header =
+			HeaderBuilder::with_number(1).timestamp(i32::MAX as u64).sign_by(&validator(0));
 		assert_ne!(default_verify(&header), Err(Error::TimestampOverflow));
 	}
 
@@ -575,7 +593,8 @@ mod tests {
 		// expected import context after verification
 		let expect = ImportContext::<AccountId> {
 			submitter: None,
-			parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(),
+			parent_hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3")
+				.into(),
 			parent_header: genesis(),
 			parent_total_difficulty: U256::zero(),
 			parent_scheduled_change: None,
@@ -589,7 +608,8 @@ mod tests {
 				signal_block: None,
 				enact_block: HeaderId {
 					number: 0,
-					hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3").into(),
+					hash: hex!("6e41bff05578fc1db17f6816117969b07d2217f1f9039d8116a82764335991d3")
+						.into(),
 				},
 			},
 			last_signal_block: None,
@@ -731,7 +751,10 @@ mod tests {
 	fn pool_verifies_known_blocks() {
 		// when header is known
 		assert_eq!(
-			default_accept_into_pool(|validators| (HeaderBuilder::with_parent_number(2).sign_by_set(validators), None)),
+			default_accept_into_pool(|validators| (
+				HeaderBuilder::with_parent_number(2).sign_by_set(validators),
+				None
+			)),
 			Err(Error::KnownHeader),
 		);
 	}
@@ -787,7 +810,10 @@ mod tests {
 	fn pool_verifies_future_block_number() {
 		// when header is too far from the future
 		assert_eq!(
-			default_accept_into_pool(|validators| (HeaderBuilder::with_number(100).sign_by_set(validators), None),),
+			default_accept_into_pool(|validators| (
+				HeaderBuilder::with_number(100).sign_by_set(validators),
+				None
+			),),
 			Err(Error::UnsignedTooFarInTheFuture),
 		);
 	}
@@ -813,7 +839,10 @@ mod tests {
 		// (even if header will be considered invalid/duplicate later, we can use this signature
 		// as a proof of malicious action by this validator)
 		assert_eq!(
-			default_accept_into_pool(|_| (HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)), None,)),
+			default_accept_into_pool(|_| (
+				HeaderBuilder::with_number(8).step(8).sign_by(&validator(1)),
+				None,
+			)),
 			Err(Error::NotValidator),
 		);
 	}
@@ -831,10 +860,7 @@ mod tests {
 				// no tags are required
 				vec![],
 				// header provides two tags
-				vec![
-					(4u64, validators_addresses(3)[1]).encode(),
-					(4u64, hash.unwrap()).encode(),
-				],
+				vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),],
 			)),
 		);
 	}
@@ -845,9 +871,8 @@ mod tests {
 		let mut parent_id = None;
 		assert_eq!(
 			default_accept_into_pool(|validators| {
-				let header = HeaderBuilder::with_number(5)
-					.step(GENESIS_STEP + 5)
-					.sign_by_set(validators);
+				let header =
+					HeaderBuilder::with_number(5).step(GENESIS_STEP + 5).sign_by_set(validators);
 				id = Some(header.compute_id());
 				parent_id = header.parent_id();
 				(header, None)
@@ -883,7 +908,11 @@ mod tests {
 		assert_eq!(
 			default_accept_into_pool(|actual_validators| {
 				// change finalized set at parent header + signal valid set at parent block
-				change_validators_set_at(3, validators_addresses(10), Some(validators_addresses(3)));
+				change_validators_set_at(
+					3,
+					validators_addresses(10),
+					Some(validators_addresses(3)),
+				);
 
 				// header is signed using wrong set
 				let header = HeaderBuilder::with_number(5)
@@ -935,10 +964,7 @@ mod tests {
 				// no tags are required
 				vec![],
 				// header provides two tags
-				vec![
-					(4u64, validators_addresses(3)[1]).encode(),
-					(4u64, hash.unwrap()).encode(),
-				],
+				vec![(4u64, validators_addresses(3)[1]).encode(), (4u64, hash.unwrap()).encode(),],
 			)),
 		);
 	}
diff --git a/polkadot/bridges/modules/grandpa/Cargo.toml b/polkadot/bridges/modules/grandpa/Cargo.toml
index 53f1916d62d9836b713d56e2e8ab9d3d35658b1e..01195abe89e16435dfd6cd7c9fce164e1c86103d 100644
--- a/polkadot/bridges/modules/grandpa/Cargo.toml
+++ b/polkadot/bridges/modules/grandpa/Cargo.toml
@@ -8,8 +8,8 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
-finality-grandpa = { version = "0.14.4", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
+finality-grandpa = { version = "0.14.0", default-features = false }
 log = { version = "0.4.14", default-features = false }
 num-traits = { version = "0.2", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
@@ -22,18 +22,19 @@ bp-header-chain = { path = "../../primitives/header-chain", default-features = f
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 # Optional Benchmarking Dependencies
 bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true }
-frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
 
 [dev-dependencies]
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
 
 [features]
diff --git a/polkadot/bridges/modules/grandpa/src/benchmarking.rs b/polkadot/bridges/modules/grandpa/src/benchmarking.rs
index bc027e86a4b59863b216bc87b6c31b2c40de66ac..46e1e41a87028bd18dd7ce03a3862e00cd518d6f 100644
--- a/polkadot/bridges/modules/grandpa/src/benchmarking.rs
+++ b/polkadot/bridges/modules/grandpa/src/benchmarking.rs
@@ -23,7 +23,7 @@
 //! 2. The number of `pre-commits` in the justification
 //!
 //! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where
-//! `header_of_chain` is a decendant of `finality_target`.
+//! `header_of_chain` is a descendant of `finality_target`.
 //!
 //! Pre-commits are messages which are signed by validators at the head of the chain they think is
 //! the best.
@@ -34,7 +34,7 @@
 //! [A] <- [B] <- [C]
 //!
 //! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to
-//! verify this we will have vote ancestries of [B, C, B', C'] and pre-commits [C, C'].
+//! verify this we will have vote ancestries of `[B, C, B', C']` and pre-commits `[C, C']`.
 //!
 //! Note that the worst case scenario here would be a justification where each validator has it's
 //! own fork which is `SESSION_LENGTH` blocks long.
@@ -42,7 +42,8 @@
 use crate::*;
 
 use bp_test_utils::{
-	accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, TEST_GRANDPA_SET_ID,
+	accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND,
+	TEST_GRANDPA_SET_ID,
 };
 use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller};
 use frame_support::traits::Get;
@@ -63,7 +64,7 @@ const MAX_VALIDATOR_SET_SIZE: u32 = 1024;
 
 /// Returns number of first header to be imported.
 ///
-/// Since we boostrap the pallet with `HeadersToKeep` already imported headers,
+/// Since we bootstrap the pallet with `HeadersToKeep` already imported headers,
 /// this function computes the next expected header number to import.
 fn header_number<T: Config<I>, I: 'static, N: From<u32>>() -> N {
 	(T::HeadersToKeep::get() + 1).into()
@@ -80,7 +81,7 @@ fn prepare_benchmark_data<T: Config<I>, I: 'static>(
 		.collect::<Vec<_>>();
 
 	let init_data = InitializationData {
-		header: bp_test_utils::test_header(Zero::zero()),
+		header: Box::new(bp_test_utils::test_header(Zero::zero())),
 		authority_list,
 		set_id: TEST_GRANDPA_SET_ID,
 		is_halted: false,
@@ -109,7 +110,7 @@ benchmarks_instance_pallet! {
 		let v in 1..MAX_VOTE_ANCESTRIES;
 		let caller: T::AccountId = whitelisted_caller();
 		let (header, justification) = prepare_benchmark_data::<T, I>(p, v);
-	}: submit_finality_proof(RawOrigin::Signed(caller), header, justification)
+	}: submit_finality_proof(RawOrigin::Signed(caller), Box::new(header), justification)
 	verify {
 		let header: BridgedHeader<T, I> = bp_test_utils::test_header(header_number::<T, I, _>());
 		let expected_hash = header.hash();
diff --git a/polkadot/bridges/modules/grandpa/src/lib.rs b/polkadot/bridges/modules/grandpa/src/lib.rs
index 700df5b8469099af516d8c6d3ca4851fb2d9fcd3..4c1b16b7c2c5db8669ecf72817a66a0de387628a 100644
--- a/polkadot/bridges/modules/grandpa/src/lib.rs
+++ b/polkadot/bridges/modules/grandpa/src/lib.rs
@@ -28,7 +28,7 @@
 //!
 //! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only
 //! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe
-//! bug causing resulting in an equivocation. Such events are outside of the scope of this pallet.
+//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet.
 //! Shall the fork occur on the bridged chain governance intervention will be required to
 //! re-initialize the bridge and track the right fork.
 
@@ -38,15 +38,14 @@
 
 use crate::weights::WeightInfo;
 
-use bp_header_chain::justification::GrandpaJustification;
-use bp_header_chain::InitializationData;
+use bp_header_chain::{justification::GrandpaJustification, InitializationData};
 use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf};
 use finality_grandpa::voter_set::VoterSet;
 use frame_support::{ensure, fail};
 use frame_system::{ensure_signed, RawOrigin};
 use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID};
 use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero};
-use sp_std::convert::TryInto;
+use sp_std::{boxed::Box, convert::TryInto};
 
 #[cfg(test)]
 mod mock;
@@ -130,16 +129,13 @@ pub mod pallet {
 		))]
 		pub fn submit_finality_proof(
 			origin: OriginFor<T>,
-			finality_target: BridgedHeader<T, I>,
+			finality_target: Box<BridgedHeader<T, I>>,
 			justification: GrandpaJustification<BridgedHeader<T, I>>,
 		) -> DispatchResultWithPostInfo {
 			ensure_operational::<T, I>()?;
 			let _ = ensure_signed(origin)?;
 
-			ensure!(
-				Self::request_count() < T::MaxRequests::get(),
-				<Error<T, I>>::TooManyRequests
-			);
+			ensure!(Self::request_count() < T::MaxRequests::get(), <Error<T, I>>::TooManyRequests);
 
 			let (hash, number) = (finality_target.hash(), finality_target.number());
 			log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target);
@@ -153,30 +149,40 @@ pub mod pallet {
 						finality_target,
 					);
 					fail!(<Error<T, I>>::NotInitialized);
-				}
+				},
 			};
 
 			// We do a quick check here to ensure that our header chain is making progress and isn't
-			// "travelling back in time" (which could be indicative of something bad, e.g a hard-fork).
+			// "travelling back in time" (which could be indicative of something bad, e.g a
+			// hard-fork).
 			ensure!(best_finalized.number() < number, <Error<T, I>>::OldHeader);
 
 			let authority_set = <CurrentAuthoritySet<T, I>>::get();
 			let set_id = authority_set.set_id;
 			verify_justification::<T, I>(&justification, hash, *number, authority_set)?;
 
-			let _enacted = try_enact_authority_change::<T, I>(&finality_target, set_id)?;
+			let is_authorities_change_enacted =
+				try_enact_authority_change::<T, I>(&finality_target, set_id)?;
 			<RequestCount<T, I>>::mutate(|count| *count += 1);
-			insert_header::<T, I>(finality_target, hash);
+			insert_header::<T, I>(*finality_target, hash);
 			log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash);
 
-			Ok(().into())
+			// mandatory header is a header that changes authorities set. The pallet can't go
+			// further without importing this header. So every bridge MUST import mandatory headers.
+			//
+			// We don't want to charge extra costs for mandatory operations. So relayer is not
+			// paying fee for mandatory headers import transactions.
+			let is_mandatory_header = is_authorities_change_enacted;
+			let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes };
+
+			Ok(pays_fee.into())
 		}
 
 		/// Bootstrap the bridge pallet with an initial header and authority set from which to sync.
 		///
 		/// The initial configuration provided does not need to be the genesis header of the bridged
-		/// chain, it can be any arbirary header. You can also provide the next scheduled set change
-		/// if it is already know.
+		/// chain, it can be any arbitrary header. You can also provide the next scheduled set
+		/// change if it is already know.
 		///
 		/// This function is only allowed to be called from a trusted origin and writes to storage
 		/// with practically no checks in terms of the validity of the data. It is important that
@@ -205,17 +211,20 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
-		pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResultWithPostInfo {
+		pub fn set_owner(
+			origin: OriginFor<T>,
+			new_owner: Option<T::AccountId>,
+		) -> DispatchResultWithPostInfo {
 			ensure_owner_or_root::<T, I>(origin)?;
 			match new_owner {
 				Some(new_owner) => {
 					PalletOwner::<T, I>::put(&new_owner);
 					log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner);
-				}
+				},
 				None => {
 					PalletOwner::<T, I>::kill();
 					log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet.");
-				}
+				},
 			}
 
 			Ok(().into())
@@ -225,7 +234,10 @@ pub mod pallet {
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
-		pub fn set_operational(origin: OriginFor<T>, operational: bool) -> DispatchResultWithPostInfo {
+		pub fn set_operational(
+			origin: OriginFor<T>,
+			operational: bool,
+		) -> DispatchResultWithPostInfo {
 			ensure_owner_or_root::<T, I>(origin)?;
 			<IsHalted<T, I>>::put(operational);
 
@@ -252,11 +264,13 @@ pub mod pallet {
 
 	/// Hash of the header used to bootstrap the pallet.
 	#[pallet::storage]
-	pub(super) type InitialHash<T: Config<I>, I: 'static = ()> = StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
+	pub(super) type InitialHash<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
 
 	/// Hash of the best finalized header.
 	#[pallet::storage]
-	pub(super) type BestFinalized<T: Config<I>, I: 'static = ()> = StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
+	pub(super) type BestFinalized<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, BridgedBlockHash<T, I>, ValueQuery>;
 
 	/// A ring buffer of imported hashes. Ordered by the insertion time.
 	#[pallet::storage]
@@ -265,7 +279,8 @@ pub mod pallet {
 
 	/// Current ring buffer position.
 	#[pallet::storage]
-	pub(super) type ImportedHashesPointer<T: Config<I>, I: 'static = ()> = StorageValue<_, u32, ValueQuery>;
+	pub(super) type ImportedHashesPointer<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, u32, ValueQuery>;
 
 	/// Headers which have been imported into the pallet.
 	#[pallet::storage]
@@ -284,7 +299,8 @@ pub mod pallet {
 	/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
 	/// flag directly or call the `halt_operations`).
 	#[pallet::storage]
-	pub(super) type PalletOwner<T: Config<I>, I: 'static = ()> = StorageValue<_, T::AccountId, OptionQuery>;
+	pub(super) type PalletOwner<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, T::AccountId, OptionQuery>;
 
 	/// If true, all pallet transactions are failed immediately.
 	#[pallet::storage]
@@ -301,10 +317,7 @@ pub mod pallet {
 	#[cfg(feature = "std")]
 	impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> {
 		fn default() -> Self {
-			Self {
-				owner: None,
-				init_data: None,
-			}
+			Self { owner: None, init_data: None }
 		}
 	}
 
@@ -355,7 +368,7 @@ pub mod pallet {
 	/// is found it will be enacted immediately.
 	///
 	/// This function does not support forced changes, or scheduled changes with delays
-	/// since these types of changes are indicitive of abnormal behaviour from GRANDPA.
+	/// since these types of changes are indicative of abnormal behavior from GRANDPA.
 	///
 	/// Returned value will indicate if a change was enacted or not.
 	pub(crate) fn try_enact_authority_change<T: Config<I>, I: 'static>(
@@ -401,7 +414,7 @@ pub mod pallet {
 	///
 	/// Will use the GRANDPA current authorities known to the pallet.
 	///
-	/// If succesful it returns the decoded GRANDPA justification so we can refund any weight which
+	/// If successful it returns the decoded GRANDPA justification so we can refund any weight which
 	/// was overcharged in the initial call.
 	pub(crate) fn verify_justification<T: Config<I>, I: 'static>(
 		justification: &GrandpaJustification<BridgedHeader<T, I>>,
@@ -411,29 +424,35 @@ pub mod pallet {
 	) -> Result<(), sp_runtime::DispatchError> {
 		use bp_header_chain::justification::verify_justification;
 
-		let voter_set = VoterSet::new(authority_set.authorities).ok_or(<Error<T, I>>::InvalidAuthoritySet)?;
+		let voter_set =
+			VoterSet::new(authority_set.authorities).ok_or(<Error<T, I>>::InvalidAuthoritySet)?;
 		let set_id = authority_set.set_id;
 
-		Ok(
-			verify_justification::<BridgedHeader<T, I>>((hash, number), set_id, &voter_set, justification).map_err(
-				|e| {
-					log::error!(
-						target: "runtime::bridge-grandpa",
-						"Received invalid justification for {:?}: {:?}",
-						hash,
-						e,
-					);
-					<Error<T, I>>::InvalidJustification
-				},
-			)?,
+		Ok(verify_justification::<BridgedHeader<T, I>>(
+			(hash, number),
+			set_id,
+			&voter_set,
+			justification,
 		)
+		.map_err(|e| {
+			log::error!(
+				target: "runtime::bridge-grandpa",
+				"Received invalid justification for {:?}: {:?}",
+				hash,
+				e,
+			);
+			<Error<T, I>>::InvalidJustification
+		})?)
 	}
 
 	/// Import a previously verified header to the storage.
 	///
 	/// Note this function solely takes care of updating the storage and pruning old entries,
-	/// but does not verify the validaty of such import.
-	pub(crate) fn insert_header<T: Config<I>, I: 'static>(header: BridgedHeader<T, I>, hash: BridgedBlockHash<T, I>) {
+	/// but does not verify the validity of such import.
+	pub(crate) fn insert_header<T: Config<I>, I: 'static>(
+		header: BridgedHeader<T, I>,
+		hash: BridgedBlockHash<T, I>,
+	) {
 		let index = <ImportedHashesPointer<T, I>>::get();
 		let pruning = <ImportedHashes<T, I>>::try_get(index);
 		<BestFinalized<T, I>>::put(hash);
@@ -453,17 +472,12 @@ pub mod pallet {
 	pub(crate) fn initialize_bridge<T: Config<I>, I: 'static>(
 		init_params: super::InitializationData<BridgedHeader<T, I>>,
 	) {
-		let super::InitializationData {
-			header,
-			authority_list,
-			set_id,
-			is_halted,
-		} = init_params;
+		let super::InitializationData { header, authority_list, set_id, is_halted } = init_params;
 
 		let initial_hash = header.hash();
 		<InitialHash<T, I>>::put(initial_hash);
 		<ImportedHashesPointer<T, I>>::put(0);
-		insert_header::<T, I>(header, initial_hash);
+		insert_header::<T, I>(*header, initial_hash);
 
 		let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id);
 		<CurrentAuthoritySet<T, I>>::put(authority_set);
@@ -498,7 +512,9 @@ pub mod pallet {
 	fn ensure_owner_or_root<T: Config<I>, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> {
 		match origin.into() {
 			Ok(RawOrigin::Root) => Ok(()),
-			Ok(RawOrigin::Signed(ref signer)) if Some(signer) == <PalletOwner<T, I>>::get().as_ref() => Ok(()),
+			Ok(RawOrigin::Signed(ref signer))
+				if Some(signer) == <PalletOwner<T, I>>::get().as_ref() =>
+				Ok(()),
 			_ => Err(BadOrigin),
 		}
 	}
@@ -545,14 +561,17 @@ impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		parse: impl FnOnce(bp_runtime::StorageProofChecker<BridgedBlockHasher<T, I>>) -> R,
 	) -> Result<R, sp_runtime::DispatchError> {
 		let header = <ImportedHeaders<T, I>>::get(hash).ok_or(Error::<T, I>::UnknownHeader)?;
-		let storage_proof_checker = bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof)
-			.map_err(|_| Error::<T, I>::StorageRootMismatch)?;
+		let storage_proof_checker =
+			bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof)
+				.map_err(|_| Error::<T, I>::StorageRootMismatch)?;
 
 		Ok(parse(storage_proof_checker))
 	}
 }
 
-pub(crate) fn find_scheduled_change<H: HeaderT>(header: &H) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
+pub(crate) fn find_scheduled_change<H: HeaderT>(
+	header: &H,
+) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
 	use sp_runtime::generic::OpaqueDigestItemId;
 
 	let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);
@@ -567,7 +586,7 @@ pub(crate) fn find_scheduled_change<H: HeaderT>(header: &H) -> Option<sp_finalit
 	header.digest().convert_first(|l| l.try_to(id).and_then(filter_log))
 }
 
-/// Checks the given header for a consensus digest signalling a **forced** scheduled change and
+/// Checks the given header for a consensus digest signaling a **forced** scheduled change and
 /// extracts it.
 pub(crate) fn find_forced_change<H: HeaderT>(
 	header: &H,
@@ -590,8 +609,9 @@ pub(crate) fn find_forced_change<H: HeaderT>(
 #[cfg(feature = "runtime-benchmarks")]
 pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader<T, I>) {
 	initialize_bridge::<T, I>(InitializationData {
-		header,
-		authority_list: sp_std::vec::Vec::new(), // we don't verify any proofs in external benchmarks
+		header: Box::new(header),
+		authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external
+		                                          * benchmarks */
 		set_id: 0,
 		is_halted: false,
 	});
@@ -600,14 +620,15 @@ pub fn initialize_for_benchmarks<T: Config<I>, I: 'static>(header: BridgedHeader
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::mock::{run_test, test_header, Origin, TestHeader, TestNumber, TestRuntime};
+	use crate::mock::{
+		run_test, test_header, Origin, TestHash, TestHeader, TestNumber, TestRuntime,
+	};
 	use bp_test_utils::{
-		authority_list, make_default_justification, make_justification_for_header, JustificationGeneratorParams, ALICE,
-		BOB,
+		authority_list, make_default_justification, make_justification_for_header,
+		JustificationGeneratorParams, ALICE, BOB,
 	};
 	use codec::Encode;
-	use frame_support::weights::PostDispatchInfo;
-	use frame_support::{assert_err, assert_noop, assert_ok};
+	use frame_support::{assert_err, assert_noop, assert_ok, weights::PostDispatchInfo};
 	use sp_runtime::{Digest, DigestItem, DispatchError};
 
 	fn initialize_substrate_bridge() {
@@ -616,11 +637,14 @@ mod tests {
 
 	fn init_with_origin(
 		origin: Origin,
-	) -> Result<InitializationData<TestHeader>, sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>> {
+	) -> Result<
+		InitializationData<TestHeader>,
+		sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>,
+	> {
 		let genesis = test_header(0);
 
 		let init_data = InitializationData {
-			header: genesis,
+			header: Box::new(genesis),
 			authority_list: authority_list(),
 			set_id: 1,
 			is_halted: false,
@@ -633,7 +657,11 @@ mod tests {
 		let header = test_header(header.into());
 		let justification = make_default_justification(&header);
 
-		Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification)
+		Pallet::<TestRuntime>::submit_finality_proof(
+			Origin::signed(1),
+			Box::new(header),
+			justification,
+		)
 	}
 
 	fn next_block() {
@@ -644,15 +672,14 @@ mod tests {
 		let _ = Pallet::<TestRuntime>::on_initialize(current_number);
 	}
 
-	fn change_log(delay: u64) -> Digest {
-		let consensus_log = ConsensusLog::<TestNumber>::ScheduledChange(sp_finality_grandpa::ScheduledChange {
-			next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
-			delay,
-		});
+	fn change_log(delay: u64) -> Digest<TestHash> {
+		let consensus_log =
+			ConsensusLog::<TestNumber>::ScheduledChange(sp_finality_grandpa::ScheduledChange {
+				next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)],
+				delay,
+			});
 
-		Digest {
-			logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())],
-		}
+		Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }
 	}
 
 	fn forced_change_log(delay: u64) -> Digest {
@@ -664,9 +691,7 @@ mod tests {
 			},
 		);
 
-		Digest {
-			logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())],
-		}
+		Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }
 	}
 
 	#[test]
@@ -792,7 +817,13 @@ mod tests {
 	fn succesfully_imports_header_with_valid_finality() {
 		run_test(|| {
 			initialize_substrate_bridge();
-			assert_ok!(submit_finality_proof(1));
+			assert_ok!(
+				submit_finality_proof(1),
+				PostDispatchInfo {
+					actual_weight: None,
+					pays_fee: frame_support::weights::Pays::Yes,
+				},
+			);
 
 			let header = test_header(1);
 			assert_eq!(<BestFinalized<TestRuntime>>::get(), header.hash());
@@ -807,14 +838,16 @@ mod tests {
 
 			let header = test_header(1);
 
-			let params = JustificationGeneratorParams::<TestHeader> {
-				set_id: 2,
-				..Default::default()
-			};
+			let params =
+				JustificationGeneratorParams::<TestHeader> { set_id: 2, ..Default::default() };
 			let justification = make_justification_for_header(params);
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidJustification
 			);
 		})
@@ -830,7 +863,11 @@ mod tests {
 			justification.round = 42;
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidJustification
 			);
 		})
@@ -843,7 +880,7 @@ mod tests {
 
 			let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)];
 			let init_data = InitializationData {
-				header: genesis,
+				header: Box::new(genesis),
 				authority_list: invalid_authority_list,
 				set_id: 1,
 				is_halted: false,
@@ -855,7 +892,11 @@ mod tests {
 			let justification = make_default_justification(&header);
 
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification,),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification,
+				),
 				<Error<TestRuntime>>::InvalidAuthoritySet
 			);
 		})
@@ -889,11 +930,17 @@ mod tests {
 			let justification = make_default_justification(&header);
 
 			// Let's import our test header
-			assert_ok!(Pallet::<TestRuntime>::submit_finality_proof(
-				Origin::signed(1),
-				header.clone(),
-				justification
-			));
+			assert_ok!(
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header.clone()),
+					justification
+				),
+				PostDispatchInfo {
+					actual_weight: None,
+					pays_fee: frame_support::weights::Pays::No,
+				},
+			);
 
 			// Make sure that our header is the best finalized
 			assert_eq!(<BestFinalized<TestRuntime>>::get(), header.hash());
@@ -922,7 +969,11 @@ mod tests {
 
 			// Should not be allowed to import this header
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification
+				),
 				<Error<TestRuntime>>::UnsupportedScheduledChange
 			);
 		})
@@ -943,7 +994,11 @@ mod tests {
 
 			// Should not be allowed to import this header
 			assert_err!(
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, justification),
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					justification
+				),
 				<Error<TestRuntime>>::UnsupportedScheduledChange
 			);
 		})
@@ -1001,7 +1056,11 @@ mod tests {
 				let mut invalid_justification = make_default_justification(&header);
 				invalid_justification.round = 42;
 
-				Pallet::<TestRuntime>::submit_finality_proof(Origin::signed(1), header, invalid_justification)
+				Pallet::<TestRuntime>::submit_finality_proof(
+					Origin::signed(1),
+					Box::new(header),
+					invalid_justification,
+				)
 			};
 
 			initialize_substrate_bridge();
diff --git a/polkadot/bridges/modules/grandpa/src/mock.rs b/polkadot/bridges/modules/grandpa/src/mock.rs
index 8851dbb041a677ede0137226c9186be42af4e4c0..f8b5e269323f933cb878ca82964cb1e181c4a681 100644
--- a/polkadot/bridges/modules/grandpa/src/mock.rs
+++ b/polkadot/bridges/modules/grandpa/src/mock.rs
@@ -19,6 +19,7 @@
 
 use bp_runtime::Chain;
 use frame_support::{construct_runtime, parameter_types, weights::Weight};
+use sp_core::sr25519::Signature;
 use sp_runtime::{
 	testing::{Header, H256},
 	traits::{BlakeTwo256, IdentityLookup},
@@ -100,6 +101,11 @@ impl Chain for TestBridgedChain {
 	type Hash = <TestRuntime as frame_system::Config>::Hash;
 	type Hasher = <TestRuntime as frame_system::Config>::Hashing;
 	type Header = <TestRuntime as frame_system::Config>::Header;
+
+	type AccountId = AccountId;
+	type Balance = u64;
+	type Index = u64;
+	type Signature = Signature;
 }
 
 pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
diff --git a/polkadot/bridges/modules/grandpa/src/weights.rs b/polkadot/bridges/modules/grandpa/src/weights.rs
index 18d88049f16a8b9dcb68edc5efde6aef4e32c160..c0cce2c5258d126246c14c3a228bca3354b074a7 100644
--- a/polkadot/bridges/modules/grandpa/src/weights.rs
+++ b/polkadot/bridges/modules/grandpa/src/weights.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-//! Autogenerated weights for pallet_bridge_grandpa
+//! Autogenerated weights for `pallet_bridge_grandpa`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
 //! DATE: 2021-06-03, STEPS: [50, ], REPEAT: 20
@@ -46,12 +46,12 @@ use frame_support::{
 };
 use sp_std::marker::PhantomData;
 
-/// Weight functions needed for pallet_bridge_grandpa.
+/// Weight functions needed for `pallet_bridge_grandpa`.
 pub trait WeightInfo {
 	fn submit_finality_proof(p: u32, v: u32) -> Weight;
 }
 
-/// Weights for pallet_bridge_grandpa using the Rialto node and recommended hardware.
+/// Weights for `pallet_bridge_grandpa` using the Rialto node and recommended hardware.
 pub struct RialtoWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
 	fn submit_finality_proof(p: u32, v: u32) -> Weight {
diff --git a/polkadot/bridges/modules/messages/Cargo.toml b/polkadot/bridges/modules/messages/Cargo.toml
index 10577c1406447962a52bcf23582e9d907b0b0d3b..b48bdc5c0ffd092838ec00bc21bac7b3ad880a57 100644
--- a/polkadot/bridges/modules/messages/Cargo.toml
+++ b/polkadot/bridges/modules/messages/Cargo.toml
@@ -8,7 +8,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
 bitvec = { version = "0.20", default-features = false, features = ["alloc"] }
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 log = { version = "0.4.14", default-features = false }
 num-traits = { version = "0.2", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
@@ -18,17 +18,16 @@ serde = { version = "1.0.101", optional = true, features = ["derive"] }
 
 bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false }
 bp-messages = { path = "../../primitives/messages", default-features = false }
-bp-rialto = { path = "../../primitives/chain-rialto", default-features = false }
 bp-runtime = { path = "../../primitives/runtime", default-features = false }
 
 # Substrate Dependencies
 
-frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false, optional = true }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 hex = "0.4"
@@ -42,7 +41,6 @@ std = [
 	"bp-message-dispatch/std",
 	"bp-messages/std",
 	"bp-runtime/std",
-	"bp-rialto/std",
 	"codec/std",
 	"frame-support/std",
 	"frame-system/std",
diff --git a/polkadot/bridges/modules/messages/README.md b/polkadot/bridges/modules/messages/README.md
index be25b3c37f757a289719d925d138405fc32782ea..062a966fad70a3ccfef72f93a67884ab76e8a535 100644
--- a/polkadot/bridges/modules/messages/README.md
+++ b/polkadot/bridges/modules/messages/README.md
@@ -354,7 +354,7 @@ Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_corr
 `pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every
 runtime's tests.
 
-### Post-dispatch weight refunds of the `receive_messages_proof` call
+#### Post-dispatch weight refunds of the `receive_messages_proof` call
 
 Weight formula of the `receive_messages_proof` call assumes that the dispatch fee of every message is
 paid at the target chain (where call is executed), that every message will be dispatched and that
@@ -388,6 +388,7 @@ The weight formula is:
 Weight = BaseWeight + MessagesCount * MessageConfirmationWeight
        + RelayersCount * RelayerRewardWeight
        + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight
+       + MessagesCount * (DbReadWeight + DbWriteWeight)
 ```
 
 Where:
@@ -403,6 +404,15 @@ Where:
 | `ExpectedProofSize`       | `EXTRA_STORAGE_PROOF_SIZE`                                                                                            | Size of proof that we are expecting                                                                                                                                                                     |
 | `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)`                              | Weight of processing every additional proof byte over `ExpectedProofSize` limit. We're using the same formula, as for message delivery, because proof mechanism is assumed to be the same in both cases |
 
+#### Post-dispatch weight refunds of the `receive_messages_delivery_proof` call
+
+Weight formula of the `receive_messages_delivery_proof` call assumes that all messages in the proof
+are actually delivered (so there are no already confirmed messages) and every messages is processed
+by the `OnDeliveryConfirmed` callback. This means that for every message, we're adding single db read
+weight and single db write weight. If, by some reason, messages are not processed by the
+`OnDeliveryConfirmed` callback, or their processing is faster than that additional weight, the
+difference is refunded to the submitter.
+
 #### Why we're always able to craft `receive_messages_delivery_proof` transaction?
 
 There can be at most `<PeerRuntime as pallet_bridge_messages::Config>::MaxUnconfirmedMessagesAtInboundLane`
diff --git a/polkadot/bridges/modules/messages/src/benchmarking.rs b/polkadot/bridges/modules/messages/src/benchmarking.rs
index 54cb7c26cd3d76fc2fd1a76bc705b8e8d03af115..788ccc070310ea288f264b082798209fdbb5da3c 100644
--- a/polkadot/bridges/modules/messages/src/benchmarking.rs
+++ b/polkadot/bridges/modules/messages/src/benchmarking.rs
@@ -16,18 +16,18 @@
 
 //! Messages pallet benchmarking.
 
-use crate::weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH;
 use crate::{
-	inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, outbound_lane::ReceivalConfirmationResult,
-	Call, Instance,
+	inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane,
+	outbound_lane::ReceivalConfirmationResult, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, Call,
 };
 
 use bp_messages::{
-	source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, InboundLaneData, LaneId,
-	MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, UnrewardedRelayersState,
+	source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages,
+	InboundLaneData, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	UnrewardedRelayersState,
 };
 use bp_runtime::messages::DispatchFeePayment;
-use frame_benchmarking::{account, benchmarks_instance};
+use frame_benchmarking::{account, benchmarks_instance_pallet};
 use frame_support::{traits::Get, weights::Weight};
 use frame_system::RawOrigin;
 use sp_std::{
@@ -38,23 +38,23 @@ use sp_std::{
 };
 
 /// Fee paid by submitter for single message delivery.
-pub const MESSAGE_FEE: u64 = 10_000_000_000;
+pub const MESSAGE_FEE: u64 = 100_000_000_000;
 
 const SEED: u32 = 0;
 
 /// Pallet we're benchmarking here.
-pub struct Pallet<T: Config<I>, I: crate::Instance>(crate::Pallet<T, I>);
+pub struct Pallet<T: Config<I>, I: 'static>(crate::Pallet<T, I>);
 
 /// Proof size requirements.
 pub enum ProofSize {
 	/// The proof is expected to be minimal. If value size may be changed, then it is expected to
 	/// have given size.
 	Minimal(u32),
-	/// The proof is expected to have at least given size and grow by increasing number of trie nodes
-	/// included in the proof.
+	/// The proof is expected to have at least given size and grow by increasing number of trie
+	/// nodes included in the proof.
 	HasExtraNodes(u32),
-	/// The proof is expected to have at least given size and grow by increasing value that is stored
-	/// in the trie.
+	/// The proof is expected to have at least given size and grow by increasing value that is
+	/// stored in the trie.
 	HasLargeLeaf(u32),
 }
 
@@ -91,7 +91,7 @@ pub struct MessageDeliveryProofParams<ThisChainAccountId> {
 }
 
 /// Trait that must be implemented by runtime.
-pub trait Config<I: Instance>: crate::Config<I> {
+pub trait Config<I: 'static>: crate::Config<I> {
 	/// Lane id to use in benchmarks.
 	fn bench_lane_id() -> LaneId {
 		Default::default()
@@ -123,7 +123,7 @@ pub trait Config<I: Instance>: crate::Config<I> {
 	fn is_message_dispatched(nonce: MessageNonce) -> bool;
 }
 
-benchmarks_instance! {
+benchmarks_instance_pallet! {
 	//
 	// Benchmarks that are used directly by the runtime.
 	//
@@ -237,7 +237,9 @@ benchmarks_instance! {
 	// Benchmark `increase_message_fee` with following conditions:
 	// * message has maximal message;
 	// * submitter account is killed because its balance is less than ED after payment.
-	increase_message_fee {
+	//
+	// Result of this benchmark is directly used by weight formula of the call.
+	maximal_increase_message_fee {
 		let sender = account("sender", 42, SEED);
 		T::endow_account(&sender);
 
@@ -251,6 +253,25 @@ benchmarks_instance! {
 		assert_eq!(T::account_balance(&sender), 0.into());
 	}
 
+	// Benchmark `increase_message_fee` with following conditions:
+	// * message size varies from minimal to maximal;
+	// * submitter account is killed because its balance is less than ED after payment.
+	increase_message_fee {
+		let i in 0..T::maximal_message_size().try_into().unwrap_or_default();
+
+		let sender = account("sender", 42, SEED);
+		T::endow_account(&sender);
+
+		let additional_fee = T::account_balance(&sender);
+		let lane_id = T::bench_lane_id();
+		let nonce = 1;
+
+		send_regular_message_with_payload::<T, I>(vec![42u8; i as _]);
+	}: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee)
+	verify {
+		assert_eq!(T::account_balance(&sender), 0.into());
+	}
+
 	// Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions:
 	// * proof does not include outbound lane state proof;
 	// * inbound lane already has state, so it needs to be read and decoded;
@@ -463,7 +484,7 @@ benchmarks_instance! {
 	//
 	// This is base benchmark for all other confirmations delivery benchmarks.
 	receive_delivery_proof_for_single_message {
-		let relayers_fund_id = crate::Pallet::<T, I>::relayer_fund_account_id();
+		let relayers_fund_id = crate::relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 		let relayer_id: T::AccountId = account("relayer", 0, SEED);
 		let relayer_balance = T::account_balance(&relayer_id);
 		T::endow_account(&relayers_fund_id);
@@ -503,7 +524,7 @@ benchmarks_instance! {
 	// as `weight(receive_delivery_proof_for_two_messages_by_single_relayer)
 	//   - weight(receive_delivery_proof_for_single_message)`.
 	receive_delivery_proof_for_two_messages_by_single_relayer {
-		let relayers_fund_id = crate::Pallet::<T, I>::relayer_fund_account_id();
+		let relayers_fund_id = crate::relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 		let relayer_id: T::AccountId = account("relayer", 0, SEED);
 		let relayer_balance = T::account_balance(&relayer_id);
 		T::endow_account(&relayers_fund_id);
@@ -543,7 +564,7 @@ benchmarks_instance! {
 	// as `weight(receive_delivery_proof_for_two_messages_by_two_relayers)
 	//   - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`.
 	receive_delivery_proof_for_two_messages_by_two_relayers {
-		let relayers_fund_id = crate::Pallet::<T, I>::relayer_fund_account_id();
+		let relayers_fund_id = crate::relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 		let relayer1_id: T::AccountId = account("relayer1", 1, SEED);
 		let relayer1_balance = T::account_balance(&relayer1_id);
 		let relayer2_id: T::AccountId = account("relayer2", 2, SEED);
@@ -790,7 +811,7 @@ benchmarks_instance! {
 			.try_into()
 			.expect("Value of MaxUnrewardedRelayerEntriesAtInboundLane is too large");
 
-		let relayers_fund_id = crate::Pallet::<T, I>::relayer_fund_account_id();
+		let relayers_fund_id = crate::relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 		let relayer_id: T::AccountId = account("relayer", 0, SEED);
 		let relayer_balance = T::account_balance(&relayer_id);
 		T::endow_account(&relayers_fund_id);
@@ -833,7 +854,7 @@ benchmarks_instance! {
 			.try_into()
 			.expect("Value of MaxUnconfirmedMessagesAtInboundLane is too large ");
 
-		let relayers_fund_id = crate::Pallet::<T, I>::relayer_fund_account_id();
+		let relayers_fund_id = crate::relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 		let confirmation_relayer_id = account("relayer", 0, SEED);
 		let relayers: BTreeMap<T::AccountId, T::OutboundMessageFee> = (1..=i)
 			.map(|j| {
@@ -877,23 +898,17 @@ benchmarks_instance! {
 	}
 }
 
-fn send_regular_message<T: Config<I>, I: Instance>() {
+fn send_regular_message<T: Config<I>, I: 'static>() {
 	let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
-	outbound_lane.send_message(MessageData {
-		payload: vec![],
-		fee: MESSAGE_FEE.into(),
-	});
+	outbound_lane.send_message(MessageData { payload: vec![], fee: MESSAGE_FEE.into() });
 }
 
-fn send_regular_message_with_payload<T: Config<I>, I: Instance>(payload: Vec<u8>) {
+fn send_regular_message_with_payload<T: Config<I>, I: 'static>(payload: Vec<u8>) {
 	let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
-	outbound_lane.send_message(MessageData {
-		payload,
-		fee: MESSAGE_FEE.into(),
-	});
+	outbound_lane.send_message(MessageData { payload, fee: MESSAGE_FEE.into() });
 }
 
-fn confirm_message_delivery<T: Config<I>, I: Instance>(nonce: MessageNonce) {
+fn confirm_message_delivery<T: Config<I>, I: 'static>(nonce: MessageNonce) {
 	let mut outbound_lane = outbound_lane::<T, I>(T::bench_lane_id());
 	let latest_received_nonce = outbound_lane.data().latest_received_nonce;
 	let mut relayers = VecDeque::with_capacity((nonce - latest_received_nonce) as usize);
@@ -904,12 +919,12 @@ fn confirm_message_delivery<T: Config<I>, I: Instance>(nonce: MessageNonce) {
 		});
 	}
 	assert!(matches!(
-		outbound_lane.confirm_delivery(nonce, &relayers),
+		outbound_lane.confirm_delivery(nonce - latest_received_nonce, nonce, &relayers),
 		ReceivalConfirmationResult::ConfirmedMessages(_),
 	));
 }
 
-fn receive_messages<T: Config<I>, I: Instance>(nonce: MessageNonce) {
+fn receive_messages<T: Config<I>, I: 'static>(nonce: MessageNonce) {
 	let mut inbound_lane_storage = inbound_lane_storage::<T, I>(T::bench_lane_id());
 	inbound_lane_storage.set_data(InboundLaneData {
 		relayers: vec![UnrewardedRelayer {
@@ -922,7 +937,10 @@ fn receive_messages<T: Config<I>, I: Instance>(nonce: MessageNonce) {
 	});
 }
 
-fn ensure_relayer_rewarded<T: Config<I>, I: Instance>(relayer_id: &T::AccountId, old_balance: &T::OutboundMessageFee) {
+fn ensure_relayer_rewarded<T: Config<I>, I: 'static>(
+	relayer_id: &T::AccountId,
+	old_balance: &T::OutboundMessageFee,
+) {
 	let new_balance = T::account_balance(relayer_id);
 	assert!(
 		new_balance > *old_balance,
diff --git a/polkadot/bridges/modules/messages/src/inbound_lane.rs b/polkadot/bridges/modules/messages/src/inbound_lane.rs
index 83d17dc3c06c23ae8fd16824825f14bd04397e36..00875bb878a823beda55a136ab96910ea8eceaaf 100644
--- a/polkadot/bridges/modules/messages/src/inbound_lane.rs
+++ b/polkadot/bridges/modules/messages/src/inbound_lane.rs
@@ -18,7 +18,8 @@
 
 use bp_messages::{
 	target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch},
-	DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData,
+	UnrewardedRelayer,
 };
 use bp_runtime::messages::MessageDispatchResult;
 use frame_support::RuntimeDebug;
@@ -53,7 +54,7 @@ pub enum ReceivalResult {
 	Dispatched(MessageDispatchResult),
 	/// Message has invalid nonce and lane has rejected to accept this message.
 	InvalidNonce,
-	/// There are too many unrewarded relayer entires at the lane.
+	/// There are too many unrewarded relayer entries at the lane.
 	TooManyUnrewardedRelayers,
 	/// There are too many unconfirmed messages at the lane.
 	TooManyUnconfirmedMessages,
@@ -71,16 +72,19 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 	}
 
 	/// Receive state of the corresponding outbound lane.
-	pub fn receive_state_update(&mut self, outbound_lane_data: OutboundLaneData) -> Option<MessageNonce> {
+	pub fn receive_state_update(
+		&mut self,
+		outbound_lane_data: OutboundLaneData,
+	) -> Option<MessageNonce> {
 		let mut data = self.storage.data();
 		let last_delivered_nonce = data.last_delivered_nonce();
 
 		if outbound_lane_data.latest_received_nonce > last_delivered_nonce {
 			// this is something that should never happen if proofs are correct
-			return None;
+			return None
 		}
 		if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce {
-			return None;
+			return None
 		}
 
 		let new_confirmed_nonce = outbound_lane_data.latest_received_nonce;
@@ -95,7 +99,8 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 			data.relayers.pop_front();
 		}
 		// Secondly, update the next record with lower nonce equal to new confirmed nonce if needed.
-		// Note: There will be max. 1 record to update as we don't allow messages from relayers to overlap.
+		// Note: There will be max. 1 record to update as we don't allow messages from relayers to
+		// overlap.
 		match data.relayers.front_mut() {
 			Some(entry) if entry.messages.begin < new_confirmed_nonce => {
 				entry.messages.dispatch_results = entry
@@ -103,8 +108,8 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 					.dispatch_results
 					.split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _);
 				entry.messages.begin = new_confirmed_nonce + 1;
-			}
-			_ => {}
+			},
+			_ => {},
 		}
 
 		self.storage.set_data(data);
@@ -122,30 +127,25 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 		let mut data = self.storage.data();
 		let is_correct_message = nonce == data.last_delivered_nonce() + 1;
 		if !is_correct_message {
-			return ReceivalResult::InvalidNonce;
+			return ReceivalResult::InvalidNonce
 		}
 
 		// if there are more unrewarded relayer entries than we may accept, reject this message
 		if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() {
-			return ReceivalResult::TooManyUnrewardedRelayers;
+			return ReceivalResult::TooManyUnrewardedRelayers
 		}
 
 		// if there are more unconfirmed messages than we may accept, reject this message
 		let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce);
 		if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() {
-			return ReceivalResult::TooManyUnconfirmedMessages;
+			return ReceivalResult::TooManyUnconfirmedMessages
 		}
 
-		// dispatch message before updating anything in the storage. If dispatch would panic,
-		// (which should not happen in the runtime) then we simply won't consider message as
-		// delivered (no changes to the inbound lane storage have been made).
+		// then, dispatch message
 		let dispatch_result = P::dispatch(
 			relayer_at_this_chain,
 			DispatchMessage {
-				key: MessageKey {
-					lane_id: self.storage.id(),
-					nonce,
-				},
+				key: MessageKey { lane_id: self.storage.id(), nonce },
 				data: message_data,
 			},
 		);
@@ -155,7 +155,7 @@ impl<S: InboundLaneStorage> InboundLane<S> {
 			Some(entry) if entry.relayer == *relayer_at_bridged_chain => {
 				entry.messages.note_dispatched_message(dispatch_result.dispatch_result);
 				false
-			}
+			},
 			_ => true,
 		};
 		if push_new {
@@ -176,14 +176,15 @@ mod tests {
 	use crate::{
 		inbound_lane,
 		mock::{
-			dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, TestRuntime,
-			REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C,
+			dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch,
+			TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B,
+			TEST_RELAYER_C,
 		},
-		DefaultInstance, RuntimeInboundLaneStorage,
+		RuntimeInboundLaneStorage,
 	};
 
 	fn receive_regular_message(
-		lane: &mut InboundLane<RuntimeInboundLaneStorage<TestRuntime, DefaultInstance>>,
+		lane: &mut InboundLane<RuntimeInboundLaneStorage<TestRuntime, ()>>,
 		nonce: MessageNonce,
 	) {
 		assert_eq!(
@@ -286,16 +287,10 @@ mod tests {
 			let mut seed_storage_data = lane.storage.data();
 			// Prepare data
 			seed_storage_data.last_confirmed_nonce = 0;
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A));
 			// Simulate messages batch (2, 3, 4) from relayer #2
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
-			seed_storage_data
-				.relayers
-				.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B));
+			seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C));
 			lane.storage.set_data(seed_storage_data);
 			// Check
 			assert_eq!(
@@ -337,7 +332,8 @@ mod tests {
 	fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() {
 		run_test(|| {
 			let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
-			let max_nonce = <TestRuntime as crate::Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
+			let max_nonce =
+				<TestRuntime as crate::Config>::MaxUnrewardedRelayerEntriesAtInboundLane::get();
 			for current_nonce in 1..max_nonce + 1 {
 				assert_eq!(
 					lane.receive_message::<TestMessageDispatch, _>(
@@ -376,7 +372,8 @@ mod tests {
 	fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() {
 		run_test(|| {
 			let mut lane = inbound_lane::<TestRuntime, _>(TEST_LANE_ID);
-			let max_nonce = <TestRuntime as crate::Config>::MaxUnconfirmedMessagesAtInboundLane::get();
+			let max_nonce =
+				<TestRuntime as crate::Config>::MaxUnconfirmedMessagesAtInboundLane::get();
 			for current_nonce in 1..=max_nonce {
 				assert_eq!(
 					lane.receive_message::<TestMessageDispatch, _>(
diff --git a/polkadot/bridges/modules/messages/src/instant_payments.rs b/polkadot/bridges/modules/messages/src/instant_payments.rs
index 524a3765d6ad4924c56c6d6e55b117f03915e774..c145687af994f0701c4ebd6ba0034babfa998e56 100644
--- a/polkadot/bridges/modules/messages/src/instant_payments.rs
+++ b/polkadot/bridges/modules/messages/src/instant_payments.rs
@@ -19,58 +19,57 @@
 //! The payment is first transferred to a special `relayers-fund` account and only transferred
 //! to the actual relayer in case confirmation is received.
 
+use crate::OutboundMessages;
+
 use bp_messages::{
 	source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, Sender},
-	MessageNonce,
+	LaneId, MessageKey, MessageNonce, UnrewardedRelayer,
 };
 use codec::Encode;
 use frame_support::traits::{Currency as CurrencyT, ExistenceRequirement, Get};
-use num_traits::Zero;
+use num_traits::{SaturatingAdd, Zero};
 use sp_runtime::traits::Saturating;
-use sp_std::fmt::Debug;
+use sp_std::{collections::vec_deque::VecDeque, fmt::Debug, ops::RangeInclusive};
 
 /// Instant message payments made in given currency.
 ///
-/// The balance is initally reserved in a special `relayers-fund` account, and transferred
+/// The balance is initially reserved in a special `relayers-fund` account, and transferred
 /// to the relayer when message delivery is confirmed.
 ///
-/// Additionaly, confirmation transaction submitter (`confirmation_relayer`) is reimbursed
+/// Additionally, confirmation transaction submitter (`confirmation_relayer`) is reimbursed
 /// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation).
 ///
 /// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the
-/// pallet enforces that) to make sure that even if the message cost is below ED it is still payed
+/// pallet enforces that) to make sure that even if the message cost is below ED it is still paid
 /// to the relayer account.
 /// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they
 /// can receive the payment.
-pub struct InstantCurrencyPayments<T, Currency, GetConfirmationFee, RootAccount> {
-	_phantom: sp_std::marker::PhantomData<(T, Currency, GetConfirmationFee, RootAccount)>,
+pub struct InstantCurrencyPayments<T, I, Currency, GetConfirmationFee, RootAccount> {
+	_phantom: sp_std::marker::PhantomData<(T, I, Currency, GetConfirmationFee, RootAccount)>,
 }
 
-impl<T, Currency, GetConfirmationFee, RootAccount> MessageDeliveryAndDispatchPayment<T::AccountId, Currency::Balance>
-	for InstantCurrencyPayments<T, Currency, GetConfirmationFee, RootAccount>
+impl<T, I, Currency, GetConfirmationFee, RootAccount>
+	MessageDeliveryAndDispatchPayment<T::AccountId, Currency::Balance>
+	for InstantCurrencyPayments<T, I, Currency, GetConfirmationFee, RootAccount>
 where
-	T: frame_system::Config,
-	Currency: CurrencyT<T::AccountId>,
+	T: frame_system::Config + crate::Config<I>,
+	I: 'static,
+	Currency: CurrencyT<T::AccountId, Balance = T::OutboundMessageFee>,
 	Currency::Balance: From<MessageNonce>,
 	GetConfirmationFee: Get<Currency::Balance>,
 	RootAccount: Get<Option<T::AccountId>>,
 {
 	type Error = &'static str;
 
-	fn initialize(relayer_fund_account: &T::AccountId) -> usize {
-		assert!(
-			frame_system::Pallet::<T>::account_exists(relayer_fund_account),
-			"The relayer fund account ({:?}) must exist for the message lanes pallet to work correctly.",
-			relayer_fund_account,
-		);
-		1
-	}
-
 	fn pay_delivery_and_dispatch_fee(
 		submitter: &Sender<T::AccountId>,
 		fee: &Currency::Balance,
 		relayer_fund_account: &T::AccountId,
 	) -> Result<(), Self::Error> {
+		if !frame_system::Pallet::<T>::account_exists(relayer_fund_account) {
+			return Err("The relayer fund account must exist for the message lanes pallet to work correctly.");
+		}
+
 		let root_account = RootAccount::get();
 		let account = match submitter {
 			Sender::Signed(submitter) => submitter,
@@ -90,19 +89,55 @@ where
 	}
 
 	fn pay_relayers_rewards(
+		lane_id: LaneId,
+		messages_relayers: VecDeque<UnrewardedRelayer<T::AccountId>>,
 		confirmation_relayer: &T::AccountId,
-		relayers_rewards: RelayersRewards<T::AccountId, Currency::Balance>,
+		received_range: &RangeInclusive<MessageNonce>,
 		relayer_fund_account: &T::AccountId,
 	) {
-		pay_relayers_rewards::<Currency, _>(
-			confirmation_relayer,
-			relayers_rewards,
-			relayer_fund_account,
-			GetConfirmationFee::get(),
-		);
+		let relayers_rewards =
+			cal_relayers_rewards::<T, I>(lane_id, messages_relayers, received_range);
+		if !relayers_rewards.is_empty() {
+			pay_relayers_rewards::<Currency, _>(
+				confirmation_relayer,
+				relayers_rewards,
+				relayer_fund_account,
+				GetConfirmationFee::get(),
+			);
+		}
 	}
 }
 
+/// Calculate the relayers rewards
+pub(crate) fn cal_relayers_rewards<T, I>(
+	lane_id: LaneId,
+	messages_relayers: VecDeque<UnrewardedRelayer<T::AccountId>>,
+	received_range: &RangeInclusive<MessageNonce>,
+) -> RelayersRewards<T::AccountId, T::OutboundMessageFee>
+where
+	T: frame_system::Config + crate::Config<I>,
+	I: 'static,
+{
+	// remember to reward relayers that have delivered messages
+	// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain
+	let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new();
+	for entry in messages_relayers {
+		let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start());
+		let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end());
+
+		// loop won't proceed if current entry is ahead of received range (begin > end).
+		// this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain
+		let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default();
+		for nonce in nonce_begin..nonce_end + 1 {
+			let message_data = OutboundMessages::<T, I>::get(MessageKey { lane_id, nonce })
+				.expect("message was just confirmed; we never prune unconfirmed messages; qed");
+			relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee);
+			relayer_reward.messages += 1;
+		}
+	}
+	relayers_rewards
+}
+
 /// Pay rewards to given relayers, optionally rewarding confirmation relayer.
 fn pay_relayers_rewards<Currency, AccountId>(
 	confirmation_relayer: &AccountId,
@@ -123,26 +158,31 @@ fn pay_relayers_rewards<Currency, AccountId>(
 			// If delivery confirmation is submitted by other relayer, let's deduct confirmation fee
 			// from relayer reward.
 			//
-			// If confirmation fee has been increased (or if it was the only component of message fee),
-			// then messages relayer may receive zero reward.
+			// If confirmation fee has been increased (or if it was the only component of message
+			// fee), then messages relayer may receive zero reward.
 			let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into());
 			if confirmation_reward > relayer_reward {
 				confirmation_reward = relayer_reward;
 			}
 			relayer_reward = relayer_reward.saturating_sub(confirmation_reward);
-			confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(confirmation_reward);
+			confirmation_relayer_reward =
+				confirmation_relayer_reward.saturating_add(confirmation_reward);
 		} else {
 			// If delivery confirmation is submitted by this relayer, let's add confirmation fee
 			// from other relayers to this relayer reward.
 			confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward);
-			continue;
+			continue
 		}
 
 		pay_relayer_reward::<Currency, _>(relayer_fund_account, &relayer, relayer_reward);
 	}
 
 	// finally - pay reward to confirmation relayer
-	pay_relayer_reward::<Currency, _>(relayer_fund_account, confirmation_relayer, confirmation_relayer_reward);
+	pay_relayer_reward::<Currency, _>(
+		relayer_fund_account,
+		confirmation_relayer,
+		confirmation_relayer_reward,
+	);
 }
 
 /// Transfer funds from relayers fund account to given relayer.
@@ -155,7 +195,7 @@ fn pay_relayer_reward<Currency, AccountId>(
 	Currency: CurrencyT<AccountId>,
 {
 	if reward.is_zero() {
-		return;
+		return
 	}
 
 	let pay_result = Currency::transfer(
@@ -198,20 +238,8 @@ mod tests {
 
 	fn relayers_rewards() -> RelayersRewards<TestAccountId, TestBalance> {
 		vec![
-			(
-				RELAYER_1,
-				RelayerRewards {
-					reward: 100,
-					messages: 2,
-				},
-			),
-			(
-				RELAYER_2,
-				RelayerRewards {
-					reward: 100,
-					messages: 3,
-				},
-			),
+			(RELAYER_1, RelayerRewards { reward: 100, messages: 2 }),
+			(RELAYER_2, RelayerRewards { reward: 100, messages: 3 }),
 		]
 		.into_iter()
 		.collect()
@@ -220,7 +248,12 @@ mod tests {
 	#[test]
 	fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_2, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_2,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				10,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 80);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 120);
@@ -230,7 +263,12 @@ mod tests {
 	#[test]
 	fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 10);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_3,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				10,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 80);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 70);
@@ -241,7 +279,12 @@ mod tests {
 	#[test]
 	fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() {
 		run_test(|| {
-			pay_relayers_rewards::<Balances, _>(&RELAYER_3, relayers_rewards(), &RELAYERS_FUND_ACCOUNT, 1000);
+			pay_relayers_rewards::<Balances, _>(
+				&RELAYER_3,
+				relayers_rewards(),
+				&RELAYERS_FUND_ACCOUNT,
+				1000,
+			);
 
 			assert_eq!(Balances::free_balance(&RELAYER_1), 0);
 			assert_eq!(Balances::free_balance(&RELAYER_2), 0);
diff --git a/polkadot/bridges/modules/messages/src/lib.rs b/polkadot/bridges/modules/messages/src/lib.rs
index 5594fdca6bfc0395d9637ecd914edd0944b4e1bc..80d946112c6f894052d6519021fe07be32a318d7 100644
--- a/polkadot/bridges/modules/messages/src/lib.rs
+++ b/polkadot/bridges/modules/messages/src/lib.rs
@@ -38,35 +38,39 @@
 #![allow(clippy::unused_unit)]
 
 pub use crate::weights_ext::{
-	ensure_able_to_receive_confirmation, ensure_able_to_receive_message, ensure_weights_are_correct, WeightInfoExt,
-	EXPECTED_DEFAULT_MESSAGE_LENGTH,
+	ensure_able_to_receive_confirmation, ensure_able_to_receive_message,
+	ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH,
 };
 
-use crate::inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult};
-use crate::outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult};
-use crate::weights::WeightInfo;
+use crate::{
+	inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult},
+	outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult},
+	weights::WeightInfo,
+};
 
 use bp_messages::{
 	source_chain::{
-		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, TargetHeaderChain,
+		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed,
+		OnMessageAccepted, SendMessageArtifacts, TargetHeaderChain,
+	},
+	target_chain::{
+		DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain,
 	},
-	target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
-	total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, MessageNonce,
-	OperatingMode, OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayersState,
+	total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey,
+	MessageNonce, OperatingMode, OutboundLaneData, Parameter as MessagesParameter,
+	UnrewardedRelayersState,
 };
-use bp_runtime::Size;
+use bp_runtime::{ChainId, Size};
 use codec::{Decode, Encode};
 use frame_support::{
-	decl_error, decl_event, decl_module, decl_storage,
-	dispatch::DispatchResultWithPostInfo,
-	ensure, fail,
+	fail,
 	traits::Get,
-	weights::{DispatchClass, Pays, PostDispatchInfo, Weight},
-	Parameter, StorageMap,
+	weights::{Pays, PostDispatchInfo},
 };
-use frame_system::{ensure_signed, RawOrigin};
+use frame_system::RawOrigin;
 use num_traits::{SaturatingAdd, Zero};
-use sp_runtime::{traits::BadOrigin, DispatchResult};
+use sp_core::H256;
+use sp_runtime::traits::{BadOrigin, Convert};
 use sp_std::{cell::RefCell, cmp::PartialOrd, marker::PhantomData, prelude::*};
 
 mod inbound_lane;
@@ -82,191 +86,138 @@ pub mod benchmarking;
 #[cfg(test)]
 mod mock;
 
-/// The module configuration trait
-pub trait Config<I = DefaultInstance>: frame_system::Config {
-	// General types
+pub use pallet::*;
 
-	/// They overarching event type.
-	type Event: From<Event<Self, I>> + Into<<Self as frame_system::Config>::Event>;
-	/// Benchmarks results from runtime we're plugged into.
-	type WeightInfo: WeightInfoExt;
-	/// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime
-	/// for integrating the pallet.
-	///
-	/// All pallet parameters may only be updated either by the root, or by the pallet owner.
-	type Parameter: MessagesParameter;
-
-	/// Maximal number of messages that may be pruned during maintenance. Maintenance occurs
-	/// whenever new message is sent. The reason is that if you want to use lane, you should
-	/// be ready to pay for its maintenance.
-	type MaxMessagesToPruneAtOnce: Get<MessageNonce>;
-	/// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the
-	/// relayer has delivered messages, but either confirmations haven't been delivered back to the
-	/// source chain, or we haven't received reward confirmations yet.
-	///
-	/// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep
-	/// in mind that the same relayer account may take several (non-consecutive) entries in this
-	/// set.
-	type MaxUnrewardedRelayerEntriesAtInboundLane: Get<MessageNonce>;
-	/// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the
-	/// message has been delivered, but either confirmations haven't been delivered back to the
-	/// source chain, or we haven't received reward confirmations for these messages yet.
-	///
-	/// This constant limits difference between last message from last entry of the
-	/// `InboundLaneData::relayers` and first message at the first entry.
-	///
-	/// There is no point of making this parameter lesser than MaxUnrewardedRelayerEntriesAtInboundLane,
-	/// because then maximal number of relayer entries will be limited by maximal number of messages.
-	///
-	/// This value also represents maximal number of messages in single delivery transaction. Transaction
-	/// that is declaring more messages than this value, will be rejected. Even if these messages are
-	/// from different lanes.
-	type MaxUnconfirmedMessagesAtInboundLane: Get<MessageNonce>;
-
-	/// Payload type of outbound messages. This payload is dispatched on the bridged chain.
-	type OutboundPayload: Parameter + Size;
-	/// Message fee type of outbound messages. This fee is paid on this chain.
-	type OutboundMessageFee: Default + From<u64> + PartialOrd + Parameter + SaturatingAdd + Zero;
-
-	/// Payload type of inbound messages. This payload is dispatched on this chain.
-	type InboundPayload: Decode;
-	/// Message fee type of inbound messages. This fee is paid on the bridged chain.
-	type InboundMessageFee: Decode;
-	/// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the bridged chain.
-	type InboundRelayer: Parameter;
-
-	/// A type which can be turned into an AccountId from a 256-bit hash.
-	///
-	/// Used when deriving the shared relayer fund account.
-	type AccountIdConverter: sp_runtime::traits::Convert<sp_core::hash::H256, Self::AccountId>;
-
-	// Types that are used by outbound_lane (on source chain).
-
-	/// Target header chain.
-	type TargetHeaderChain: TargetHeaderChain<Self::OutboundPayload, Self::AccountId>;
-	/// Message payload verifier.
-	type LaneMessageVerifier: LaneMessageVerifier<Self::AccountId, Self::OutboundPayload, Self::OutboundMessageFee>;
-	/// Message delivery payment.
-	type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment<Self::AccountId, Self::OutboundMessageFee>;
-	/// Handler for delivered messages.
-	type OnDeliveryConfirmed: OnDeliveryConfirmed;
-
-	// Types that are used by inbound_lane (on target chain).
-
-	/// Source header chain, as it is represented on target chain.
-	type SourceHeaderChain: SourceHeaderChain<Self::InboundMessageFee>;
-	/// Message dispatch.
-	type MessageDispatch: MessageDispatch<
-		Self::AccountId,
-		Self::InboundMessageFee,
-		DispatchPayload = Self::InboundPayload,
-	>;
-}
-
-/// Shortcut to messages proof type for Config.
-type MessagesProofOf<T, I> =
-	<<T as Config<I>>::SourceHeaderChain as SourceHeaderChain<<T as Config<I>>::InboundMessageFee>>::MessagesProof;
-/// Shortcut to messages delivery proof type for Config.
-type MessagesDeliveryProofOf<T, I> = <<T as Config<I>>::TargetHeaderChain as TargetHeaderChain<
-	<T as Config<I>>::OutboundPayload,
-	<T as frame_system::Config>::AccountId,
->>::MessagesDeliveryProof;
-
-decl_error! {
-	pub enum Error for Pallet<T: Config<I>, I: Instance> {
-		/// All pallet operations are halted.
-		Halted,
-		/// Message has been treated as invalid by chain verifier.
-		MessageRejectedByChainVerifier,
-		/// Message has been treated as invalid by lane verifier.
-		MessageRejectedByLaneVerifier,
-		/// Submitter has failed to pay fee for delivering and dispatching messages.
-		FailedToWithdrawMessageFee,
-		/// The transaction brings too many messages.
-		TooManyMessagesInTheProof,
-		/// Invalid messages has been submitted.
-		InvalidMessagesProof,
-		/// Invalid messages dispatch weight has been declared by the relayer.
-		InvalidMessagesDispatchWeight,
-		/// Invalid messages delivery proof has been submitted.
-		InvalidMessagesDeliveryProof,
-		/// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane).
-		InvalidUnrewardedRelayers,
-		/// The relayer has declared invalid unrewarded relayers state in the `receive_messages_delivery_proof` call.
-		InvalidUnrewardedRelayersState,
-		/// The message someone is trying to work with (i.e. increase fee) is already-delivered.
-		MessageIsAlreadyDelivered,
-		/// The message someone is trying to work with (i.e. increase fee) is not yet sent.
-		MessageIsNotYetSent
-	}
-}
-
-decl_storage! {
-	trait Store for Pallet<T: Config<I>, I: Instance = DefaultInstance> as BridgeMessages {
-		/// Optional pallet owner.
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::config]
+	pub trait Config<I: 'static = ()>: frame_system::Config {
+		// General types
+
+		/// The overarching event type.
+		type Event: From<Event<Self, I>> + IsType<<Self as frame_system::Config>::Event>;
+		/// Benchmarks results from runtime we're plugged into.
+		type WeightInfo: WeightInfoExt;
+
+		/// Gets the chain id value from the instance.
+		#[pallet::constant]
+		type BridgedChainId: Get<ChainId>;
+		/// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime
+		/// for integrating the pallet.
 		///
-		/// Pallet owner has a right to halt all pallet operations and then resume it. If it is
-		/// `None`, then there are no direct ways to halt/resume pallet operations, but other
-		/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
-		/// flag directly or call the `halt_operations`).
-		pub PalletOwner get(fn module_owner): Option<T::AccountId>;
-		/// The current operating mode of the pallet.
+		/// All pallet parameters may only be updated either by the root, or by the pallet owner.
+		type Parameter: MessagesParameter;
+
+		/// Maximal number of messages that may be pruned during maintenance. Maintenance occurs
+		/// whenever new message is sent. The reason is that if you want to use lane, you should
+		/// be ready to pay for its maintenance.
+		type MaxMessagesToPruneAtOnce: Get<MessageNonce>;
+		/// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the
+		/// relayer has delivered messages, but either confirmations haven't been delivered back to
+		/// the source chain, or we haven't received reward confirmations yet.
 		///
-		/// Depending on the mode either all, some, or no transactions will be allowed.
-		pub PalletOperatingMode get(fn operating_mode) config(): OperatingMode;
-		/// Map of lane id => inbound lane data.
-		pub InboundLanes: map hasher(blake2_128_concat) LaneId => InboundLaneData<T::InboundRelayer>;
-		/// Map of lane id => outbound lane data.
-		pub OutboundLanes: map hasher(blake2_128_concat) LaneId => OutboundLaneData;
-		/// All queued outbound messages.
-		pub OutboundMessages: map hasher(blake2_128_concat) MessageKey => Option<MessageData<T::OutboundMessageFee>>;
-	}
-	add_extra_genesis {
-		config(phantom): sp_std::marker::PhantomData<I>;
-		config(owner): Option<T::AccountId>;
-		build(|config| {
-			if let Some(ref owner) = config.owner {
-				<PalletOwner<T, I>>::put(owner);
-			}
-		})
-	}
-}
-
-decl_event!(
-	pub enum Event<T, I = DefaultInstance>
-	where
-		AccountId = <T as frame_system::Config>::AccountId,
-		Parameter = <T as Config<I>>::Parameter,
-	{
-		/// Pallet parameter has been updated.
-		ParameterUpdated(Parameter),
-		/// Message has been accepted and is waiting to be delivered.
-		MessageAccepted(LaneId, MessageNonce),
-		/// Messages in the inclusive range have been delivered to the bridged chain.
-		MessagesDelivered(LaneId, DeliveredMessages),
-		/// Phantom member, never used.
-		Dummy(PhantomData<(AccountId, I)>),
-	}
-);
-
-decl_module! {
-	pub struct Module<T: Config<I>, I: Instance = DefaultInstance> for enum Call where origin: T::Origin {
-		/// Deposit one of this module's events by using the default implementation.
-		fn deposit_event() = default;
-
-		/// Ensure runtime invariants.
-		fn on_runtime_upgrade() -> Weight {
-			let reads = T::MessageDeliveryAndDispatchPayment::initialize(
-				&Self::relayer_fund_account_id()
-			);
-			T::DbWeight::get().reads(reads as u64)
-		}
-
+		/// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep
+		/// in mind that the same relayer account may take several (non-consecutive) entries in this
+		/// set.
+		type MaxUnrewardedRelayerEntriesAtInboundLane: Get<MessageNonce>;
+		/// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the
+		/// message has been delivered, but either confirmations haven't been delivered back to the
+		/// source chain, or we haven't received reward confirmations for these messages yet.
+		///
+		/// This constant limits difference between last message from last entry of the
+		/// `InboundLaneData::relayers` and first message at the first entry.
+		///
+		/// There is no point of making this parameter lesser than
+		/// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries
+		/// will be limited by maximal number of messages.
+		///
+		/// This value also represents maximal number of messages in single delivery transaction.
+		/// Transaction that is declaring more messages than this value, will be rejected. Even if
+		/// these messages are from different lanes.
+		type MaxUnconfirmedMessagesAtInboundLane: Get<MessageNonce>;
+
+		/// Payload type of outbound messages. This payload is dispatched on the bridged chain.
+		type OutboundPayload: Parameter + Size;
+		/// Message fee type of outbound messages. This fee is paid on this chain.
+		type OutboundMessageFee: Default
+			+ From<u64>
+			+ PartialOrd
+			+ Parameter
+			+ SaturatingAdd
+			+ Zero
+			+ Copy;
+
+		/// Payload type of inbound messages. This payload is dispatched on this chain.
+		type InboundPayload: Decode;
+		/// Message fee type of inbound messages. This fee is paid on the bridged chain.
+		type InboundMessageFee: Decode;
+		/// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the
+		/// bridged chain.
+		type InboundRelayer: Parameter;
+
+		/// A type which can be turned into an AccountId from a 256-bit hash.
+		///
+		/// Used when deriving the shared relayer fund account.
+		type AccountIdConverter: sp_runtime::traits::Convert<sp_core::hash::H256, Self::AccountId>;
+
+		// Types that are used by outbound_lane (on source chain).
+
+		/// Target header chain.
+		type TargetHeaderChain: TargetHeaderChain<Self::OutboundPayload, Self::AccountId>;
+		/// Message payload verifier.
+		type LaneMessageVerifier: LaneMessageVerifier<
+			Self::AccountId,
+			Self::OutboundPayload,
+			Self::OutboundMessageFee,
+		>;
+		/// Message delivery payment.
+		type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment<
+			Self::AccountId,
+			Self::OutboundMessageFee,
+		>;
+		/// Handler for accepted messages.
+		type OnMessageAccepted: OnMessageAccepted;
+		/// Handler for delivered messages.
+		type OnDeliveryConfirmed: OnDeliveryConfirmed;
+
+		// Types that are used by inbound_lane (on target chain).
+
+		/// Source header chain, as it is represented on target chain.
+		type SourceHeaderChain: SourceHeaderChain<Self::InboundMessageFee>;
+		/// Message dispatch.
+		type MessageDispatch: MessageDispatch<
+			Self::AccountId,
+			Self::InboundMessageFee,
+			DispatchPayload = Self::InboundPayload,
+		>;
+	}
+
+	/// Shortcut to messages proof type for Config.
+	type MessagesProofOf<T, I> = <<T as Config<I>>::SourceHeaderChain as SourceHeaderChain<
+		<T as Config<I>>::InboundMessageFee,
+	>>::MessagesProof;
+	/// Shortcut to messages delivery proof type for Config.
+	type MessagesDeliveryProofOf<T, I> =
+		<<T as Config<I>>::TargetHeaderChain as TargetHeaderChain<
+			<T as Config<I>>::OutboundPayload,
+			<T as frame_system::Config>::AccountId,
+		>>::MessagesDeliveryProof;
+
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
+
+	#[pallet::call]
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {
 		/// Change `PalletOwner`.
 		///
 		/// May only be called either by root, or by `PalletOwner`.
-		#[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)]
-		pub fn set_owner(origin, new_owner: Option<T::AccountId>) {
+		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
+		pub fn set_owner(origin: OriginFor<T>, new_owner: Option<T::AccountId>) -> DispatchResult {
 			ensure_owner_or_root::<T, I>(origin)?;
 			match new_owner {
 				Some(new_owner) => {
@@ -278,149 +229,104 @@ decl_module! {
 					log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet.");
 				},
 			}
+			Ok(())
 		}
 
 		/// Halt or resume all/some pallet operations.
 		///
 		/// May only be called either by root, or by `PalletOwner`.
-		#[weight = (T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational)]
-		pub fn set_operating_mode(origin, operating_mode: OperatingMode) {
+		#[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))]
+		pub fn set_operating_mode(
+			origin: OriginFor<T>,
+			operating_mode: OperatingMode,
+		) -> DispatchResult {
 			ensure_owner_or_root::<T, I>(origin)?;
-			<PalletOperatingMode<I>>::put(operating_mode);
+			PalletOperatingMode::<T, I>::put(operating_mode);
 			log::info!(
 				target: "runtime::bridge-messages",
 				"Setting messages pallet operating mode to {:?}.",
 				operating_mode,
 			);
+			Ok(())
 		}
 
 		/// Update pallet parameter.
 		///
 		/// May only be called either by root, or by `PalletOwner`.
 		///
-		/// The weight is: single read for permissions check + 2 writes for parameter value and event.
-		#[weight = (T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational)]
-		pub fn update_pallet_parameter(origin, parameter: T::Parameter) {
+		/// The weight is: single read for permissions check + 2 writes for parameter value and
+		/// event.
+		#[pallet::weight((T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational))]
+		pub fn update_pallet_parameter(
+			origin: OriginFor<T>,
+			parameter: T::Parameter,
+		) -> DispatchResult {
 			ensure_owner_or_root::<T, I>(origin)?;
 			parameter.save();
-			Self::deposit_event(RawEvent::ParameterUpdated(parameter));
+			Self::deposit_event(Event::ParameterUpdated(parameter));
+			Ok(())
 		}
 
 		/// Send message over lane.
-		#[weight = T::WeightInfo::send_message_weight(payload)]
+		#[pallet::weight(T::WeightInfo::send_message_weight(payload, T::DbWeight::get()))]
 		pub fn send_message(
-			origin,
+			origin: OriginFor<T>,
 			lane_id: LaneId,
 			payload: T::OutboundPayload,
 			delivery_and_dispatch_fee: T::OutboundMessageFee,
-		) -> DispatchResult {
-			ensure_normal_operating_mode::<T, I>()?;
-			let submitter = origin.into().map_err(|_| BadOrigin)?;
-
-			// let's first check if message can be delivered to target chain
-			T::TargetHeaderChain::verify_message(&payload)
-				.map_err(|err| {
-					log::trace!(
-						target: "runtime::bridge-messages",
-						"Message to lane {:?} is rejected by target chain: {:?}",
-						lane_id,
-						err,
-					);
-
-					Error::<T, I>::MessageRejectedByChainVerifier
-				})?;
-
-			// now let's enforce any additional lane rules
-			let mut lane = outbound_lane::<T, I>(lane_id);
-			T::LaneMessageVerifier::verify_message(
-				&submitter,
-				&delivery_and_dispatch_fee,
-				&lane_id,
-				&lane.data(),
-				&payload,
-			).map_err(|err| {
-				log::trace!(
-					target: "runtime::bridge-messages",
-					"Message to lane {:?} is rejected by lane verifier: {:?}",
-					lane_id,
-					err,
-				);
-
-				Error::<T, I>::MessageRejectedByLaneVerifier
-			})?;
-
-			// let's withdraw delivery and dispatch fee from submitter
-			T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee(
-				&submitter,
-				&delivery_and_dispatch_fee,
-				&Self::relayer_fund_account_id(),
-			).map_err(|err| {
-				log::trace!(
-					target: "runtime::bridge-messages",
-					"Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}",
-					lane_id,
-					submitter,
-					delivery_and_dispatch_fee,
-					err,
-				);
-
-				Error::<T, I>::FailedToWithdrawMessageFee
-			})?;
-
-			// finally, save message in outbound storage and emit event
-			let encoded_payload = payload.encode();
-			let encoded_payload_len = encoded_payload.len();
-			let nonce = lane.send_message(MessageData {
-				payload: encoded_payload,
-				fee: delivery_and_dispatch_fee,
-			});
-			lane.prune_messages(T::MaxMessagesToPruneAtOnce::get());
-
-			log::trace!(
-				target: "runtime::bridge-messages",
-				"Accepted message {} to lane {:?}. Message size: {:?}",
-				nonce,
+		) -> DispatchResultWithPostInfo {
+			crate::send_message::<T, I>(
+				origin.into().map_err(|_| BadOrigin)?,
 				lane_id,
-				encoded_payload_len,
-			);
-
-			Self::deposit_event(RawEvent::MessageAccepted(lane_id, nonce));
-
-			Ok(())
+				payload,
+				delivery_and_dispatch_fee,
+			)
+			.map(|sent_message| PostDispatchInfo {
+				actual_weight: Some(sent_message.weight),
+				pays_fee: Pays::Yes,
+			})
 		}
 
 		/// Pay additional fee for the message.
-		#[weight = T::WeightInfo::increase_message_fee()]
+		#[pallet::weight(T::WeightInfo::maximal_increase_message_fee())]
 		pub fn increase_message_fee(
-			origin,
+			origin: OriginFor<T>,
 			lane_id: LaneId,
 			nonce: MessageNonce,
 			additional_fee: T::OutboundMessageFee,
-		) -> DispatchResult {
+		) -> DispatchResultWithPostInfo {
 			ensure_not_halted::<T, I>()?;
 			// if someone tries to pay for already-delivered message, we're rejecting this intention
 			// (otherwise this additional fee will be locked forever in relayers fund)
 			//
-			// if someone tries to pay for not-yet-sent message, we're rejeting this intention, or
+			// if someone tries to pay for not-yet-sent message, we're rejecting this intention, or
 			// we're risking to have mess in the storage
 			let lane = outbound_lane::<T, I>(lane_id);
-			ensure!(nonce > lane.data().latest_received_nonce, Error::<T, I>::MessageIsAlreadyDelivered);
-			ensure!(nonce <= lane.data().latest_generated_nonce, Error::<T, I>::MessageIsNotYetSent);
+			ensure!(
+				nonce > lane.data().latest_received_nonce,
+				Error::<T, I>::MessageIsAlreadyDelivered
+			);
+			ensure!(
+				nonce <= lane.data().latest_generated_nonce,
+				Error::<T, I>::MessageIsNotYetSent
+			);
 
 			// withdraw additional fee from submitter
 			let submitter = origin.into().map_err(|_| BadOrigin)?;
 			T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee(
 				&submitter,
 				&additional_fee,
-				&Self::relayer_fund_account_id(),
-			).map_err(|err| {
+				&relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>(),
+			)
+			.map_err(|err| {
 				log::trace!(
 					target: "runtime::bridge-messages",
-					"Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?}: {:?}",
+					"Submitter {:?} can't pay additional fee {:?} for the message {:?}/{:?} to {:?}: {:?}",
 					submitter,
 					additional_fee,
 					lane_id,
 					nonce,
+					relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>(),
 					err,
 				);
 
@@ -429,17 +335,24 @@ decl_module! {
 
 			// and finally update fee in the storage
 			let message_key = MessageKey { lane_id, nonce };
-			OutboundMessages::<T, I>::mutate(message_key, |message_data| {
+			let message_size = OutboundMessages::<T, I>::mutate(message_key, |message_data| {
 				// saturating_add is fine here - overflow here means that someone controls all
 				// chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee`
 				// above will fail before we reach here
-				let message_data = message_data
-					.as_mut()
-					.expect("the message is sent and not yet delivered; so it is in the storage; qed");
+				let message_data = message_data.as_mut().expect(
+					"the message is sent and not yet delivered; so it is in the storage; qed",
+				);
 				message_data.fee = message_data.fee.saturating_add(&additional_fee);
+				message_data.payload.len()
 			});
 
-			Ok(())
+			// compute actual dispatch weight that depends on the stored message size
+			let actual_weight = sp_std::cmp::min(
+				T::WeightInfo::maximal_increase_message_fee(),
+				T::WeightInfo::increase_message_fee(message_size as _),
+			);
+
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 
 		/// Receive messages proof from bridged chain.
@@ -447,9 +360,9 @@ decl_module! {
 		/// The weight of the call assumes that the transaction always brings outbound lane
 		/// state update. Because of that, the submitter (relayer) has no benefit of not including
 		/// this data in the transaction, so reward confirmations lags should be minimal.
-		#[weight = T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight)]
+		#[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))]
 		pub fn receive_messages_proof(
-			origin,
+			origin: OriginFor<T>,
 			relayer_id_at_bridged_chain: T::InboundRelayer,
 			proof: MessagesProofOf<T, I>,
 			messages_count: u32,
@@ -467,8 +380,8 @@ decl_module! {
 			// why do we need to know the weight of this (`receive_messages_proof`) call? Because
 			// we may want to return some funds for not-dispatching (or partially dispatching) some
 			// messages to the call origin (relayer). And this is done by returning actual weight
-			// from the call. But we only know dispatch weight of every messages. So to refund relayer
-			// because we have not dispatched Message, we need to:
+			// from the call. But we only know dispatch weight of every messages. So to refund
+			// relayer because we have not dispatched Message, we need to:
 			//
 			// ActualWeight = DeclaredWeight - Message.DispatchWeight
 			//
@@ -487,40 +400,20 @@ decl_module! {
 				T::InboundMessageFee,
 				T::InboundPayload,
 			>(proof, messages_count)
-				.map_err(|err| {
-					log::trace!(
-						target: "runtime::bridge-messages",
-						"Rejecting invalid messages proof: {:?}",
-						err,
-					);
-
-					Error::<T, I>::InvalidMessagesProof
-				})?;
-
-			// verify that relayer is paying actual dispatch weight
-			let actual_dispatch_weight: Weight = messages
-				.values()
-				.map(|lane_messages| lane_messages
-					.messages
-					.iter()
-					.map(T::MessageDispatch::dispatch_weight)
-					.fold(0, |sum, weight| sum.saturating_add(&weight))
-				)
-				.fold(0, |sum, weight| sum.saturating_add(weight));
-			if dispatch_weight < actual_dispatch_weight {
+			.map_err(|err| {
 				log::trace!(
 					target: "runtime::bridge-messages",
-					"Rejecting messages proof because of dispatch weight mismatch: declared={}, expected={}",
-					dispatch_weight,
-					actual_dispatch_weight,
+					"Rejecting invalid messages proof: {:?}",
+					err,
 				);
 
-				return Err(Error::<T, I>::InvalidMessagesDispatchWeight.into());
-			}
+				Error::<T, I>::InvalidMessagesProof
+			})?;
 
 			// dispatch messages and (optionally) update lane(s) state(s)
 			let mut total_messages = 0;
 			let mut valid_messages = 0;
+			let mut dispatch_weight_left = dispatch_weight;
 			for (lane_id, lane_data) in messages {
 				let mut lane = inbound_lane::<T, I>(lane_id);
 
@@ -539,8 +432,22 @@ decl_module! {
 				for message in lane_data.messages {
 					debug_assert_eq!(message.key.lane_id, lane_id);
 
-					total_messages += 1;
+					// ensure that relayer has declared enough weight for dispatching next message
+					// on this lane. We can't dispatch lane messages out-of-order, so if declared
+					// weight is not enough, let's move to next lane
 					let dispatch_weight = T::MessageDispatch::dispatch_weight(&message);
+					if dispatch_weight > dispatch_weight_left {
+						log::trace!(
+							target: "runtime::bridge-messages",
+							"Cannot dispatch any more messages on lane {:?}. Weight: declared={}, left={}",
+							lane_id,
+							dispatch_weight,
+							dispatch_weight_left,
+						);
+						break
+					}
+					total_messages += 1;
+
 					let receival_result = lane.receive_message::<T::MessageDispatch, T::AccountId>(
 						&relayer_id_at_bridged_chain,
 						&relayer_id_at_this_chain,
@@ -557,24 +464,28 @@ decl_module! {
 					let (unspent_weight, refund_pay_dispatch_fee) = match receival_result {
 						ReceivalResult::Dispatched(dispatch_result) => {
 							valid_messages += 1;
-							(dispatch_result.unspent_weight, !dispatch_result.dispatch_fee_paid_during_dispatch)
+							(
+								dispatch_result.unspent_weight,
+								!dispatch_result.dispatch_fee_paid_during_dispatch,
+							)
 						},
-						ReceivalResult::InvalidNonce
-							| ReceivalResult::TooManyUnrewardedRelayers
-							| ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true),
+						ReceivalResult::InvalidNonce |
+						ReceivalResult::TooManyUnrewardedRelayers |
+						ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true),
 					};
-					actual_weight = actual_weight
-						.saturating_sub(sp_std::cmp::min(unspent_weight, dispatch_weight))
-						.saturating_sub(
-							// delivery call weight formula assumes that the fee is paid at
-							// this (target) chain. If the message is prepaid at the source
-							// chain, let's refund relayer with this extra cost.
-							if refund_pay_dispatch_fee {
-								T::WeightInfo::pay_inbound_dispatch_fee_overhead()
-							} else {
-								0
-							}
-						);
+
+					let unspent_weight = sp_std::cmp::min(unspent_weight, dispatch_weight);
+					dispatch_weight_left -= dispatch_weight - unspent_weight;
+					actual_weight = actual_weight.saturating_sub(unspent_weight).saturating_sub(
+						// delivery call weight formula assumes that the fee is paid at
+						// this (target) chain. If the message is prepaid at the source
+						// chain, let's refund relayer with this extra cost.
+						if refund_pay_dispatch_fee {
+							T::WeightInfo::pay_inbound_dispatch_fee_overhead()
+						} else {
+							0
+						},
+					);
 				}
 			}
 
@@ -587,48 +498,86 @@ decl_module! {
 				declared_weight,
 			);
 
-			Ok(PostDispatchInfo {
-				actual_weight: Some(actual_weight),
-				pays_fee: Pays::Yes,
-			})
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 
 		/// Receive messages delivery proof from bridged chain.
-		#[weight = T::WeightInfo::receive_messages_delivery_proof_weight(proof, relayers_state)]
+		#[pallet::weight(T::WeightInfo::receive_messages_delivery_proof_weight(
+			proof,
+			relayers_state,
+			T::DbWeight::get(),
+		))]
 		pub fn receive_messages_delivery_proof(
-			origin,
+			origin: OriginFor<T>,
 			proof: MessagesDeliveryProofOf<T, I>,
 			relayers_state: UnrewardedRelayersState,
-		) -> DispatchResult {
+		) -> DispatchResultWithPostInfo {
 			ensure_not_halted::<T, I>()?;
 
+			// why do we need to know the weight of this (`receive_messages_delivery_proof`) call?
+			// Because we may want to return some funds for messages that are not processed by the
+			// delivery callback, or if their actual processing weight is less than accounted by
+			// weight formula. So to refund relayer, we need to:
+			//
+			// ActualWeight = DeclaredWeight - UnspentCallbackWeight
+			//
+			// The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible
+			// to get pre-computed value (and it has been already computed by the executive).
+			let single_message_callback_overhead =
+				T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
+			let declared_weight = T::WeightInfo::receive_messages_delivery_proof_weight(
+				&proof,
+				&relayers_state,
+				T::DbWeight::get(),
+			);
+			let mut actual_weight = declared_weight;
+
 			let confirmation_relayer = ensure_signed(origin)?;
-			let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof).map_err(|err| {
-				log::trace!(
-					target: "runtime::bridge-messages",
-					"Rejecting invalid messages delivery proof: {:?}",
-					err,
-				);
+			let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof)
+				.map_err(|err| {
+					log::trace!(
+						target: "runtime::bridge-messages",
+						"Rejecting invalid messages delivery proof: {:?}",
+						err,
+					);
 
-				Error::<T, I>::InvalidMessagesDeliveryProof
-			})?;
+					Error::<T, I>::InvalidMessagesDeliveryProof
+				})?;
 
 			// verify that the relayer has declared correct `lane_data::relayers` state
-			// (we only care about total number of entries and messages, because this affects call weight)
+			// (we only care about total number of entries and messages, because this affects call
+			// weight)
 			ensure!(
-				total_unrewarded_messages(&lane_data.relayers)
-					.unwrap_or(MessageNonce::MAX) == relayers_state.total_messages
-					&& lane_data.relayers.len() as MessageNonce == relayers_state.unrewarded_relayer_entries,
+				total_unrewarded_messages(&lane_data.relayers).unwrap_or(MessageNonce::MAX) ==
+					relayers_state.total_messages &&
+					lane_data.relayers.len() as MessageNonce ==
+						relayers_state.unrewarded_relayer_entries,
 				Error::<T, I>::InvalidUnrewardedRelayersState
 			);
 
 			// mark messages as delivered
 			let mut lane = outbound_lane::<T, I>(lane_id);
-			let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new();
 			let last_delivered_nonce = lane_data.last_delivered_nonce();
-			let confirmed_messages = match lane.confirm_delivery(last_delivered_nonce, &lane_data.relayers) {
-				ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => Some(confirmed_messages),
+			let confirmed_messages = match lane.confirm_delivery(
+				relayers_state.total_messages,
+				last_delivered_nonce,
+				&lane_data.relayers,
+			) {
+				ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) =>
+					Some(confirmed_messages),
 				ReceivalConfirmationResult::NoNewConfirmations => None,
+				ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(
+					to_confirm_messages_count,
+				) => {
+					log::trace!(
+						target: "runtime::bridge-messages",
+						"Messages delivery proof contains too many messages to confirm: {} vs declared {}",
+						to_confirm_messages_count,
+						relayers_state.total_messages,
+					);
+
+					fail!(Error::<T, I>::TryingToConfirmMoreMessagesThanExpected);
+				},
 				error => {
 					log::trace!(
 						target: "runtime::bridge-messages",
@@ -639,40 +588,53 @@ decl_module! {
 					fail!(Error::<T, I>::InvalidUnrewardedRelayers);
 				},
 			};
+
 			if let Some(confirmed_messages) = confirmed_messages {
 				// handle messages delivery confirmation
-				T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages);
+				let preliminary_callback_overhead =
+					relayers_state.total_messages.saturating_mul(single_message_callback_overhead);
+				let actual_callback_weight =
+					T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages);
+				match preliminary_callback_overhead.checked_sub(actual_callback_weight) {
+					Some(difference) if difference == 0 => (),
+					Some(difference) => {
+						log::trace!(
+							target: "runtime::bridge-messages",
+							"T::OnDeliveryConfirmed callback has spent less weight than expected. Refunding: \
+							{} - {} = {}",
+							preliminary_callback_overhead,
+							actual_callback_weight,
+							difference,
+						);
+						actual_weight = actual_weight.saturating_sub(difference);
+					},
+					None => {
+						debug_assert!(
+							false,
+							"T::OnDeliveryConfirmed callback consumed too much weight."
+						);
+						log::error!(
+							target: "runtime::bridge-messages",
+							"T::OnDeliveryConfirmed callback has spent more weight that it is allowed to: \
+							{} vs {}",
+							preliminary_callback_overhead,
+							actual_callback_weight,
+						);
+					},
+				}
 
 				// emit 'delivered' event
 				let received_range = confirmed_messages.begin..=confirmed_messages.end;
-				Self::deposit_event(RawEvent::MessagesDelivered(lane_id, confirmed_messages));
-
-				// remember to reward relayers that have delivered messages
-				// this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain
-				for entry in lane_data.relayers {
-					let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start());
-					let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end());
-
-					// loop won't proceed if current entry is ahead of received range (begin > end).
-					// this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain
-					let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default();
-					for nonce in nonce_begin..nonce_end + 1 {
-						let message_data = OutboundMessages::<T, I>::get(MessageKey {
-							lane_id,
-							nonce,
-						}).expect("message was just confirmed; we never prune unconfirmed messages; qed");
-						relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee);
-						relayer_reward.messages += 1;
-					}
-				}
-			}
+				Self::deposit_event(Event::MessagesDelivered(lane_id, confirmed_messages));
 
-			// if some new messages have been confirmed, reward relayers
-			if !relayers_rewards.is_empty() {
-				let relayer_fund_account = Self::relayer_fund_account_id();
+				// if some new messages have been confirmed, reward relayers
+				let relayer_fund_account =
+					relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>();
 				<T as Config<I>>::MessageDeliveryAndDispatchPayment::pay_relayers_rewards(
+					lane_id,
+					lane_data.relayers,
 					&confirmation_relayer,
-					relayers_rewards,
+					&received_range,
 					&relayer_fund_account,
 				);
 			}
@@ -684,124 +646,359 @@ decl_module! {
 				lane_id,
 			);
 
-			Ok(())
+			Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes })
 		}
 	}
-}
 
-impl<T: Config<I>, I: Instance> Pallet<T, I> {
-	/// Get stored data of the outbound message with given nonce.
-	pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option<MessageData<T::OutboundMessageFee>> {
-		OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce })
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config<I>, I: 'static = ()> {
+		/// Pallet parameter has been updated.
+		ParameterUpdated(T::Parameter),
+		/// Message has been accepted and is waiting to be delivered.
+		MessageAccepted(LaneId, MessageNonce),
+		/// Messages in the inclusive range have been delivered to the bridged chain.
+		MessagesDelivered(LaneId, DeliveredMessages),
 	}
 
-	/// Get nonce of latest generated message at given outbound lane.
-	pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce {
-		OutboundLanes::<I>::get(&lane).latest_generated_nonce
+	#[pallet::error]
+	pub enum Error<T, I = ()> {
+		/// All pallet operations are halted.
+		Halted,
+		/// Message has been treated as invalid by chain verifier.
+		MessageRejectedByChainVerifier,
+		/// Message has been treated as invalid by lane verifier.
+		MessageRejectedByLaneVerifier,
+		/// Submitter has failed to pay fee for delivering and dispatching messages.
+		FailedToWithdrawMessageFee,
+		/// The transaction brings too many messages.
+		TooManyMessagesInTheProof,
+		/// Invalid messages has been submitted.
+		InvalidMessagesProof,
+		/// Invalid messages delivery proof has been submitted.
+		InvalidMessagesDeliveryProof,
+		/// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane).
+		InvalidUnrewardedRelayers,
+		/// The relayer has declared invalid unrewarded relayers state in the
+		/// `receive_messages_delivery_proof` call.
+		InvalidUnrewardedRelayersState,
+		/// The message someone is trying to work with (i.e. increase fee) is already-delivered.
+		MessageIsAlreadyDelivered,
+		/// The message someone is trying to work with (i.e. increase fee) is not yet sent.
+		MessageIsNotYetSent,
+		/// The number of actually confirmed messages is going to be larger than the number of
+		/// messages in the proof. This may mean that this or bridged chain storage is corrupted.
+		TryingToConfirmMoreMessagesThanExpected,
 	}
 
-	/// Get nonce of latest confirmed message at given outbound lane.
-	pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce {
-		OutboundLanes::<I>::get(&lane).latest_received_nonce
+	/// Optional pallet owner.
+	///
+	/// Pallet owner has a right to halt all pallet operations and then resume it. If it is
+	/// `None`, then there are no direct ways to halt/resume pallet operations, but other
+	/// runtime methods may still be used to do that (i.e. democracy::referendum to update halt
+	/// flag directly or call the `halt_operations`).
+	#[pallet::storage]
+	#[pallet::getter(fn module_owner)]
+	pub type PalletOwner<T: Config<I>, I: 'static = ()> = StorageValue<_, T::AccountId>;
+
+	/// The current operating mode of the pallet.
+	///
+	/// Depending on the mode either all, some, or no transactions will be allowed.
+	#[pallet::storage]
+	#[pallet::getter(fn operating_mode)]
+	pub type PalletOperatingMode<T: Config<I>, I: 'static = ()> =
+		StorageValue<_, OperatingMode, ValueQuery>;
+
+	/// Map of lane id => inbound lane data.
+	#[pallet::storage]
+	pub type InboundLanes<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Blake2_128Concat, LaneId, InboundLaneData<T::InboundRelayer>, ValueQuery>;
+
+	/// Map of lane id => outbound lane data.
+	#[pallet::storage]
+	pub type OutboundLanes<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Blake2_128Concat, LaneId, OutboundLaneData, ValueQuery>;
+
+	/// All queued outbound messages.
+	#[pallet::storage]
+	pub type OutboundMessages<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Blake2_128Concat, MessageKey, MessageData<T::OutboundMessageFee>>;
+
+	#[pallet::genesis_config]
+	pub struct GenesisConfig<T: Config<I>, I: 'static = ()> {
+		/// Initial pallet operating mode.
+		pub operating_mode: OperatingMode,
+		/// Initial pallet owner.
+		pub owner: Option<T::AccountId>,
+		/// Dummy marker.
+		pub phantom: sp_std::marker::PhantomData<I>,
+	}
+
+	#[cfg(feature = "std")]
+	impl<T: Config<I>, I: 'static> Default for GenesisConfig<T, I> {
+		fn default() -> Self {
+			Self {
+				operating_mode: Default::default(),
+				owner: Default::default(),
+				phantom: Default::default(),
+			}
+		}
 	}
 
-	/// Get nonce of latest received message at given inbound lane.
-	pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce {
-		InboundLanes::<T, I>::get(&lane).last_delivered_nonce()
+	#[pallet::genesis_build]
+	impl<T: Config<I>, I: 'static> GenesisBuild<T, I> for GenesisConfig<T, I> {
+		fn build(&self) {
+			PalletOperatingMode::<T, I>::put(&self.operating_mode);
+			if let Some(ref owner) = self.owner {
+				PalletOwner::<T, I>::put(owner);
+			}
+		}
 	}
 
-	/// Get nonce of latest confirmed message at given inbound lane.
-	pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce {
-		InboundLanes::<T, I>::get(&lane).last_confirmed_nonce
-	}
+	impl<T: Config<I>, I: 'static> Pallet<T, I> {
+		/// Get stored data of the outbound message with given nonce.
+		pub fn outbound_message_data(
+			lane: LaneId,
+			nonce: MessageNonce,
+		) -> Option<MessageData<T::OutboundMessageFee>> {
+			OutboundMessages::<T, I>::get(MessageKey { lane_id: lane, nonce })
+		}
 
-	/// Get state of unrewarded relayers set.
-	pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> bp_messages::UnrewardedRelayersState {
-		let relayers = InboundLanes::<T, I>::get(&lane).relayers;
-		bp_messages::UnrewardedRelayersState {
-			unrewarded_relayer_entries: relayers.len() as _,
-			messages_in_oldest_entry: relayers
-				.front()
-				.map(|entry| 1 + entry.messages.end - entry.messages.begin)
-				.unwrap_or(0),
-			total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX),
+		/// Get nonce of the latest generated message at given outbound lane.
+		pub fn outbound_latest_generated_nonce(lane: LaneId) -> MessageNonce {
+			OutboundLanes::<T, I>::get(&lane).latest_generated_nonce
 		}
-	}
 
-	/// AccountId of the shared relayer fund account.
-	///
-	/// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending
-	/// on the implementation it can be used to store relayers rewards.
-	/// See [InstantCurrencyPayments] for a concrete implementation.
-	pub fn relayer_fund_account_id() -> T::AccountId {
-		use sp_runtime::traits::Convert;
-		let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID);
-		T::AccountIdConverter::convert(encoded_id)
+		/// Get nonce of the latest confirmed message at given outbound lane.
+		pub fn outbound_latest_received_nonce(lane: LaneId) -> MessageNonce {
+			OutboundLanes::<T, I>::get(&lane).latest_received_nonce
+		}
+
+		/// Get nonce of the latest received message at given inbound lane.
+		pub fn inbound_latest_received_nonce(lane: LaneId) -> MessageNonce {
+			InboundLanes::<T, I>::get(&lane).last_delivered_nonce()
+		}
+
+		/// Get nonce of the latest confirmed message at given inbound lane.
+		pub fn inbound_latest_confirmed_nonce(lane: LaneId) -> MessageNonce {
+			InboundLanes::<T, I>::get(&lane).last_confirmed_nonce
+		}
+
+		/// Get state of unrewarded relayers set.
+		pub fn inbound_unrewarded_relayers_state(
+			lane: bp_messages::LaneId,
+		) -> bp_messages::UnrewardedRelayersState {
+			let relayers = InboundLanes::<T, I>::get(&lane).relayers;
+			bp_messages::UnrewardedRelayersState {
+				unrewarded_relayer_entries: relayers.len() as _,
+				messages_in_oldest_entry: relayers
+					.front()
+					.map(|entry| 1 + entry.messages.end - entry.messages.begin)
+					.unwrap_or(0),
+				total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX),
+			}
+		}
 	}
 }
 
 /// Getting storage keys for messages and lanes states. These keys are normally used when building
 /// messages and lanes states proofs.
-///
-/// Keep in mind that all functions in this module are **NOT** using passed `T` argument, so any
-/// runtime can be passed. E.g. if you're verifying proof from Runtime1 in Runtime2, you only have
-/// access to Runtime2 and you may pass it to the functions, where required. This is because our
-/// maps are not using any Runtime-specific data in the keys.
-///
-/// On the other side, passing correct instance is required. So if proof has been crafted by the
-/// Instance1, you should verify it using Instance1. This is inconvenient if you're using different
-/// instances on different sides of the bridge. I.e. in Runtime1 it is Instance2, but on Runtime2
-/// it is Instance42. But there's no other way, but to craft this key manually (which is what I'm
-/// trying to avoid here) - by using strings like "Instance2", "OutboundMessages", etc.
 pub mod storage_keys {
 	use super::*;
-	use frame_support::{traits::Instance, StorageHasher};
 	use sp_core::storage::StorageKey;
 
 	/// Storage key of the outbound message in the runtime storage.
-	pub fn message_key<I: Instance>(lane: &LaneId, nonce: MessageNonce) -> StorageKey {
-		storage_map_final_key::<I>("OutboundMessages", &MessageKey { lane_id: *lane, nonce }.encode())
+	pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey {
+		bp_runtime::storage_map_final_key_blake2_128concat(
+			pallet_prefix,
+			"OutboundMessages",
+			&MessageKey { lane_id: *lane, nonce }.encode(),
+		)
 	}
 
 	/// Storage key of the outbound message lane state in the runtime storage.
-	pub fn outbound_lane_data_key<I: Instance>(lane: &LaneId) -> StorageKey {
-		storage_map_final_key::<I>("OutboundLanes", lane)
+	pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey {
+		bp_runtime::storage_map_final_key_blake2_128concat(pallet_prefix, "OutboundLanes", lane)
 	}
 
 	/// Storage key of the inbound message lane state in the runtime storage.
-	pub fn inbound_lane_data_key<I: Instance>(lane: &LaneId) -> StorageKey {
-		storage_map_final_key::<I>("InboundLanes", lane)
+	pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey {
+		bp_runtime::storage_map_final_key_blake2_128concat(pallet_prefix, "InboundLanes", lane)
 	}
+}
 
-	/// This is a copypaste of the `frame_support::storage::generator::StorageMap::storage_map_final_key`.
-	fn storage_map_final_key<I: Instance>(map_name: &str, key: &[u8]) -> StorageKey {
-		let module_prefix_hashed = frame_support::Twox128::hash(I::PREFIX.as_bytes());
-		let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());
-		let key_hashed = frame_support::Blake2_128Concat::hash(key);
+/// AccountId of the shared relayer fund account.
+///
+/// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending
+/// on the implementation it can be used to store relayers rewards.
+/// See [`InstantCurrencyPayments`] for a concrete implementation.
+pub fn relayer_fund_account_id<AccountId, AccountIdConverter: Convert<H256, AccountId>>(
+) -> AccountId {
+	let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID);
+	AccountIdConverter::convert(encoded_id)
+}
 
-		let mut final_key =
-			Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len());
+impl<T, I>
+	bp_messages::source_chain::MessagesBridge<
+		T::AccountId,
+		T::OutboundMessageFee,
+		T::OutboundPayload,
+	> for Pallet<T, I>
+where
+	T: Config<I>,
+	I: 'static,
+{
+	type Error = sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>;
+
+	fn send_message(
+		sender: bp_messages::source_chain::Sender<T::AccountId>,
+		lane: LaneId,
+		message: T::OutboundPayload,
+		delivery_and_dispatch_fee: T::OutboundMessageFee,
+	) -> Result<SendMessageArtifacts, Self::Error> {
+		crate::send_message::<T, I>(sender, lane, message, delivery_and_dispatch_fee)
+	}
+}
 
-		final_key.extend_from_slice(&module_prefix_hashed[..]);
-		final_key.extend_from_slice(&storage_prefix_hashed[..]);
-		final_key.extend_from_slice(key_hashed.as_ref());
+/// Function that actually sends message.
+fn send_message<T: Config<I>, I: 'static>(
+	submitter: bp_messages::source_chain::Sender<T::AccountId>,
+	lane_id: LaneId,
+	payload: T::OutboundPayload,
+	delivery_and_dispatch_fee: T::OutboundMessageFee,
+) -> sp_std::result::Result<
+	SendMessageArtifacts,
+	sp_runtime::DispatchErrorWithPostInfo<PostDispatchInfo>,
+> {
+	ensure_normal_operating_mode::<T, I>()?;
+
+	// initially, actual (post-dispatch) weight is equal to pre-dispatch weight
+	let mut actual_weight = T::WeightInfo::send_message_weight(&payload, T::DbWeight::get());
+
+	// let's first check if message can be delivered to target chain
+	T::TargetHeaderChain::verify_message(&payload).map_err(|err| {
+		log::trace!(
+			target: "runtime::bridge-messages",
+			"Message to lane {:?} is rejected by target chain: {:?}",
+			lane_id,
+			err,
+		);
+
+		Error::<T, I>::MessageRejectedByChainVerifier
+	})?;
+
+	// now let's enforce any additional lane rules
+	let mut lane = outbound_lane::<T, I>(lane_id);
+	T::LaneMessageVerifier::verify_message(
+		&submitter,
+		&delivery_and_dispatch_fee,
+		&lane_id,
+		&lane.data(),
+		&payload,
+	)
+	.map_err(|err| {
+		log::trace!(
+			target: "runtime::bridge-messages",
+			"Message to lane {:?} is rejected by lane verifier: {:?}",
+			lane_id,
+			err,
+		);
 
-		StorageKey(final_key)
+		Error::<T, I>::MessageRejectedByLaneVerifier
+	})?;
+
+	// let's withdraw delivery and dispatch fee from submitter
+	T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee(
+		&submitter,
+		&delivery_and_dispatch_fee,
+		&relayer_fund_account_id::<T::AccountId, T::AccountIdConverter>(),
+	)
+	.map_err(|err| {
+		log::trace!(
+			target: "runtime::bridge-messages",
+			"Message to lane {:?} is rejected because submitter {:?} is unable to pay fee {:?}: {:?}",
+			lane_id,
+			submitter,
+			delivery_and_dispatch_fee,
+			err,
+		);
+
+		Error::<T, I>::FailedToWithdrawMessageFee
+	})?;
+
+	// finally, save message in outbound storage and emit event
+	let encoded_payload = payload.encode();
+	let encoded_payload_len = encoded_payload.len();
+	let nonce =
+		lane.send_message(MessageData { payload: encoded_payload, fee: delivery_and_dispatch_fee });
+	// Guaranteed to be called outside only when the message is accepted.
+	// We assume that the maximum weight call back used is `single_message_callback_overhead`, so do
+	// not perform complex db operation in callback. If you want to, put these magic logic in
+	// outside pallet and control the weight there.
+	let single_message_callback_overhead =
+		T::WeightInfo::single_message_callback_overhead(T::DbWeight::get());
+	let actual_callback_weight = T::OnMessageAccepted::on_messages_accepted(&lane_id, &nonce);
+	match single_message_callback_overhead.checked_sub(actual_callback_weight) {
+		Some(difference) if difference == 0 => (),
+		Some(difference) => {
+			log::trace!(
+				target: "runtime::bridge-messages",
+				"T::OnMessageAccepted callback has spent less weight than expected. Refunding: \
+				{} - {} = {}",
+				single_message_callback_overhead,
+				actual_callback_weight,
+				difference,
+			);
+			actual_weight = actual_weight.saturating_sub(difference);
+		},
+		None => {
+			debug_assert!(false, "T::OnMessageAccepted callback consumed too much weight.");
+			log::error!(
+				target: "runtime::bridge-messages",
+				"T::OnMessageAccepted callback has spent more weight that it is allowed to: \
+				{} vs {}",
+				single_message_callback_overhead,
+				actual_callback_weight,
+			);
+		},
 	}
+
+	// message sender pays for pruning at most `MaxMessagesToPruneAtOnce` messages
+	// the cost of pruning every message is roughly single db write
+	// => lets refund sender if less than `MaxMessagesToPruneAtOnce` messages pruned
+	let max_messages_to_prune = T::MaxMessagesToPruneAtOnce::get();
+	let pruned_messages = lane.prune_messages(max_messages_to_prune);
+	if let Some(extra_messages) = max_messages_to_prune.checked_sub(pruned_messages) {
+		actual_weight = actual_weight.saturating_sub(T::DbWeight::get().writes(extra_messages));
+	}
+
+	log::trace!(
+		target: "runtime::bridge-messages",
+		"Accepted message {} to lane {:?}. Message size: {:?}",
+		nonce,
+		lane_id,
+		encoded_payload_len,
+	);
+
+	Pallet::<T, I>::deposit_event(Event::MessageAccepted(lane_id, nonce));
+
+	Ok(SendMessageArtifacts { nonce, weight: actual_weight })
 }
 
 /// Ensure that the origin is either root, or `PalletOwner`.
-fn ensure_owner_or_root<T: Config<I>, I: Instance>(origin: T::Origin) -> Result<(), BadOrigin> {
+fn ensure_owner_or_root<T: Config<I>, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> {
 	match origin.into() {
 		Ok(RawOrigin::Root) => Ok(()),
-		Ok(RawOrigin::Signed(ref signer)) if Some(signer) == Pallet::<T, I>::module_owner().as_ref() => Ok(()),
+		Ok(RawOrigin::Signed(ref signer))
+			if Some(signer) == Pallet::<T, I>::module_owner().as_ref() =>
+			Ok(()),
 		_ => Err(BadOrigin),
 	}
 }
 
 /// Ensure that the pallet is in normal operational mode.
-fn ensure_normal_operating_mode<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
-	if PalletOperatingMode::<I>::get() != OperatingMode::Normal {
+fn ensure_normal_operating_mode<T: Config<I>, I: 'static>() -> Result<(), Error<T, I>> {
+	if PalletOperatingMode::<T, I>::get() != OperatingMode::Normal {
 		Err(Error::<T, I>::Halted)
 	} else {
 		Ok(())
@@ -809,8 +1006,8 @@ fn ensure_normal_operating_mode<T: Config<I>, I: Instance>() -> Result<(), Error
 }
 
 /// Ensure that the pallet is not halted.
-fn ensure_not_halted<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
-	if PalletOperatingMode::<I>::get() == OperatingMode::Halted {
+fn ensure_not_halted<T: Config<I>, I: 'static>() -> Result<(), Error<T, I>> {
+	if PalletOperatingMode::<T, I>::get() == OperatingMode::Halted {
 		Err(Error::<T, I>::Halted)
 	} else {
 		Ok(())
@@ -818,12 +1015,16 @@ fn ensure_not_halted<T: Config<I>, I: Instance>() -> Result<(), Error<T, I>> {
 }
 
 /// Creates new inbound lane object, backed by runtime storage.
-fn inbound_lane<T: Config<I>, I: Instance>(lane_id: LaneId) -> InboundLane<RuntimeInboundLaneStorage<T, I>> {
+fn inbound_lane<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> InboundLane<RuntimeInboundLaneStorage<T, I>> {
 	InboundLane::new(inbound_lane_storage::<T, I>(lane_id))
 }
 
 /// Creates new runtime inbound lane storage.
-fn inbound_lane_storage<T: Config<I>, I: Instance>(lane_id: LaneId) -> RuntimeInboundLaneStorage<T, I> {
+fn inbound_lane_storage<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> RuntimeInboundLaneStorage<T, I> {
 	RuntimeInboundLaneStorage {
 		lane_id,
 		cached_data: RefCell::new(None),
@@ -832,21 +1033,20 @@ fn inbound_lane_storage<T: Config<I>, I: Instance>(lane_id: LaneId) -> RuntimeIn
 }
 
 /// Creates new outbound lane object, backed by runtime storage.
-fn outbound_lane<T: Config<I>, I: Instance>(lane_id: LaneId) -> OutboundLane<RuntimeOutboundLaneStorage<T, I>> {
-	OutboundLane::new(RuntimeOutboundLaneStorage {
-		lane_id,
-		_phantom: Default::default(),
-	})
+fn outbound_lane<T: Config<I>, I: 'static>(
+	lane_id: LaneId,
+) -> OutboundLane<RuntimeOutboundLaneStorage<T, I>> {
+	OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() })
 }
 
 /// Runtime inbound lane storage.
-struct RuntimeInboundLaneStorage<T: Config<I>, I = DefaultInstance> {
+struct RuntimeInboundLaneStorage<T: Config<I>, I: 'static = ()> {
 	lane_id: LaneId,
 	cached_data: RefCell<Option<InboundLaneData<T::InboundRelayer>>>,
 	_phantom: PhantomData<I>,
 }
 
-impl<T: Config<I>, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage<T, I> {
+impl<T: Config<I>, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage<T, I> {
 	type MessageFee = T::InboundMessageFee;
 	type Relayer = T::InboundRelayer;
 
@@ -872,7 +1072,7 @@ impl<T: Config<I>, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage
 						we have no recursive borrows; qed",
 				) = Some(data.clone());
 				data
-			}
+			},
 		}
 	}
 
@@ -886,12 +1086,12 @@ impl<T: Config<I>, I: Instance> InboundLaneStorage for RuntimeInboundLaneStorage
 }
 
 /// Runtime outbound lane storage.
-struct RuntimeOutboundLaneStorage<T, I = DefaultInstance> {
+struct RuntimeOutboundLaneStorage<T, I = ()> {
 	lane_id: LaneId,
 	_phantom: PhantomData<(T, I)>,
 }
 
-impl<T: Config<I>, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStorage<T, I> {
+impl<T: Config<I>, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage<T, I> {
 	type MessageFee = T::OutboundMessageFee;
 
 	fn id(&self) -> LaneId {
@@ -899,36 +1099,28 @@ impl<T: Config<I>, I: Instance> OutboundLaneStorage for RuntimeOutboundLaneStora
 	}
 
 	fn data(&self) -> OutboundLaneData {
-		OutboundLanes::<I>::get(&self.lane_id)
+		OutboundLanes::<T, I>::get(&self.lane_id)
 	}
 
 	fn set_data(&mut self, data: OutboundLaneData) {
-		OutboundLanes::<I>::insert(&self.lane_id, data)
+		OutboundLanes::<T, I>::insert(&self.lane_id, data)
 	}
 
 	#[cfg(test)]
 	fn message(&self, nonce: &MessageNonce) -> Option<MessageData<T::OutboundMessageFee>> {
-		OutboundMessages::<T, I>::get(MessageKey {
-			lane_id: self.lane_id,
-			nonce: *nonce,
-		})
+		OutboundMessages::<T, I>::get(MessageKey { lane_id: self.lane_id, nonce: *nonce })
 	}
 
-	fn save_message(&mut self, nonce: MessageNonce, mesage_data: MessageData<T::OutboundMessageFee>) {
-		OutboundMessages::<T, I>::insert(
-			MessageKey {
-				lane_id: self.lane_id,
-				nonce,
-			},
-			mesage_data,
-		);
+	fn save_message(
+		&mut self,
+		nonce: MessageNonce,
+		mesage_data: MessageData<T::OutboundMessageFee>,
+	) {
+		OutboundMessages::<T, I>::insert(MessageKey { lane_id: self.lane_id, nonce }, mesage_data);
 	}
 
 	fn remove_message(&mut self, nonce: &MessageNonce) {
-		OutboundMessages::<T, I>::remove(MessageKey {
-			lane_id: self.lane_id,
-			nonce: *nonce,
-		});
+		OutboundMessages::<T, I>::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce });
 	}
 }
 
@@ -961,12 +1153,13 @@ mod tests {
 	use super::*;
 	use crate::mock::{
 		message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin,
-		TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, TestMessagesProof,
-		TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID,
-		TEST_RELAYER_A, TEST_RELAYER_B,
+		TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter,
+		TestMessagesProof, TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2,
+		TestOnMessageAccepted, TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN,
+		REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B,
 	};
 	use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState};
-	use frame_support::{assert_noop, assert_ok};
+	use frame_support::{assert_noop, assert_ok, weights::Weight};
 	use frame_system::{EventRecord, Pallet as System, Phase};
 	use hex_literal::hex;
 	use sp_runtime::DispatchError;
@@ -976,26 +1169,27 @@ mod tests {
 		System::<TestRuntime>::reset_events();
 	}
 
-	fn send_regular_message() {
+	fn send_regular_message() -> Weight {
 		get_ready_for_events();
 
-		let message_nonce = outbound_lane::<TestRuntime, DefaultInstance>(TEST_LANE_ID)
-			.data()
-			.latest_generated_nonce
-			+ 1;
-		assert_ok!(Pallet::<TestRuntime>::send_message(
+		let message_nonce =
+			outbound_lane::<TestRuntime, ()>(TEST_LANE_ID).data().latest_generated_nonce + 1;
+		let weight = Pallet::<TestRuntime>::send_message(
 			Origin::signed(1),
 			TEST_LANE_ID,
 			REGULAR_PAYLOAD,
 			REGULAR_PAYLOAD.declared_weight,
-		));
+		)
+		.expect("send_message has failed")
+		.actual_weight
+		.expect("send_message always returns Some");
 
 		// check event with assigned nonce
 		assert_eq!(
 			System::<TestRuntime>::events(),
 			vec![EventRecord {
 				phase: Phase::Initialization,
-				event: TestEvent::Messages(RawEvent::MessageAccepted(TEST_LANE_ID, message_nonce)),
+				event: TestEvent::Messages(Event::MessageAccepted(TEST_LANE_ID, message_nonce)),
 				topics: vec![],
 			}],
 		);
@@ -1005,6 +1199,8 @@ mod tests {
 			1,
 			REGULAR_PAYLOAD.declared_weight
 		));
+
+		weight
 	}
 
 	fn receive_messages_delivery_proof() {
@@ -1036,7 +1232,7 @@ mod tests {
 			System::<TestRuntime>::events(),
 			vec![EventRecord {
 				phase: Phase::Initialization,
-				event: TestEvent::Messages(RawEvent::MessagesDelivered(
+				event: TestEvent::Messages(Event::MessagesDelivered(
 					TEST_LANE_ID,
 					DeliveredMessages::new(1, true),
 				)),
@@ -1140,7 +1336,7 @@ mod tests {
 				System::<TestRuntime>::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: TestEvent::Messages(RawEvent::ParameterUpdated(parameter)),
+					event: TestEvent::Messages(Event::ParameterUpdated(parameter)),
 					topics: vec![],
 				}],
 			);
@@ -1164,7 +1360,7 @@ mod tests {
 				System::<TestRuntime>::events(),
 				vec![EventRecord {
 					phase: Phase::Initialization,
-					event: TestEvent::Messages(RawEvent::ParameterUpdated(parameter)),
+					event: TestEvent::Messages(Event::ParameterUpdated(parameter)),
 					topics: vec![],
 				}],
 			);
@@ -1202,7 +1398,8 @@ mod tests {
 
 			// 1:1 conversion that we use by default for testnets
 			let rialto_token = 1u64;
-			let rialto_token_in_millau_tokens = TokenConversionRate::get().saturating_mul_int(rialto_token);
+			let rialto_token_in_millau_tokens =
+				TokenConversionRate::get().saturating_mul_int(rialto_token);
 			assert_eq!(rialto_token_in_millau_tokens, 1);
 
 			// let's say conversion rate is 1:1.7
@@ -1225,7 +1422,7 @@ mod tests {
 			// send message first to be able to check that delivery_proof fails later
 			send_regular_message();
 
-			PalletOperatingMode::<DefaultInstance>::put(OperatingMode::Halted);
+			PalletOperatingMode::<TestRuntime, ()>::put(OperatingMode::Halted);
 
 			assert_noop!(
 				Pallet::<TestRuntime>::send_message(
@@ -1234,12 +1431,12 @@ mod tests {
 					REGULAR_PAYLOAD,
 					REGULAR_PAYLOAD.declared_weight,
 				),
-				Error::<TestRuntime, DefaultInstance>::Halted,
+				Error::<TestRuntime, ()>::Halted,
 			);
 
 			assert_noop!(
 				Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1,),
-				Error::<TestRuntime, DefaultInstance>::Halted,
+				Error::<TestRuntime, ()>::Halted,
 			);
 
 			assert_noop!(
@@ -1250,7 +1447,7 @@ mod tests {
 					1,
 					REGULAR_PAYLOAD.declared_weight,
 				),
-				Error::<TestRuntime, DefaultInstance>::Halted,
+				Error::<TestRuntime, ()>::Halted,
 			);
 
 			assert_noop!(
@@ -1260,12 +1457,18 @@ mod tests {
 						TEST_LANE_ID,
 						InboundLaneData {
 							last_confirmed_nonce: 1,
-							..Default::default()
+							relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+								.into_iter()
+								.collect(),
 						},
 					))),
-					Default::default(),
+					UnrewardedRelayersState {
+						unrewarded_relayer_entries: 1,
+						messages_in_oldest_entry: 1,
+						total_messages: 1,
+					},
 				),
-				Error::<TestRuntime, DefaultInstance>::Halted,
+				Error::<TestRuntime, ()>::Halted,
 			);
 		});
 	}
@@ -1276,7 +1479,7 @@ mod tests {
 			// send message first to be able to check that delivery_proof fails later
 			send_regular_message();
 
-			PalletOperatingMode::<DefaultInstance>::put(OperatingMode::RejectingOutboundMessages);
+			PalletOperatingMode::<TestRuntime, ()>::put(OperatingMode::RejectingOutboundMessages);
 
 			assert_noop!(
 				Pallet::<TestRuntime>::send_message(
@@ -1285,7 +1488,7 @@ mod tests {
 					REGULAR_PAYLOAD,
 					REGULAR_PAYLOAD.declared_weight,
 				),
-				Error::<TestRuntime, DefaultInstance>::Halted,
+				Error::<TestRuntime, ()>::Halted,
 			);
 
 			assert_ok!(Pallet::<TestRuntime>::increase_message_fee(
@@ -1309,10 +1512,16 @@ mod tests {
 					TEST_LANE_ID,
 					InboundLaneData {
 						last_confirmed_nonce: 1,
-						..Default::default()
+						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+							.into_iter()
+							.collect(),
 					},
 				))),
-				Default::default(),
+				UnrewardedRelayersState {
+					unrewarded_relayer_entries: 1,
+					messages_in_oldest_entry: 1,
+					total_messages: 1,
+				},
 			));
 		});
 	}
@@ -1335,7 +1544,7 @@ mod tests {
 					PAYLOAD_REJECTED_BY_TARGET_CHAIN,
 					PAYLOAD_REJECTED_BY_TARGET_CHAIN.declared_weight
 				),
-				Error::<TestRuntime, DefaultInstance>::MessageRejectedByChainVerifier,
+				Error::<TestRuntime, ()>::MessageRejectedByChainVerifier,
 			);
 		});
 	}
@@ -1345,8 +1554,13 @@ mod tests {
 		run_test(|| {
 			// messages with zero fee are rejected by lane verifier
 			assert_noop!(
-				Pallet::<TestRuntime>::send_message(Origin::signed(1), TEST_LANE_ID, REGULAR_PAYLOAD, 0),
-				Error::<TestRuntime, DefaultInstance>::MessageRejectedByLaneVerifier,
+				Pallet::<TestRuntime>::send_message(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					REGULAR_PAYLOAD,
+					0
+				),
+				Error::<TestRuntime, ()>::MessageRejectedByLaneVerifier,
 			);
 		});
 	}
@@ -1362,7 +1576,7 @@ mod tests {
 					REGULAR_PAYLOAD,
 					REGULAR_PAYLOAD.declared_weight
 				),
-				Error::<TestRuntime, DefaultInstance>::FailedToWithdrawMessageFee,
+				Error::<TestRuntime, ()>::FailedToWithdrawMessageFee,
 			);
 		});
 	}
@@ -1386,7 +1600,7 @@ mod tests {
 	fn receive_messages_proof_updates_confirmed_message_nonce() {
 		run_test(|| {
 			// say we have received 10 messages && last confirmed message is 8
-			InboundLanes::<TestRuntime, DefaultInstance>::insert(
+			InboundLanes::<TestRuntime, ()>::insert(
 				TEST_LANE_ID,
 				InboundLaneData {
 					last_confirmed_nonce: 8,
@@ -1408,11 +1622,10 @@ mod tests {
 			);
 
 			// message proof includes outbound lane state with latest confirmed message updated to 9
-			let mut message_proof: TestMessagesProof = Ok(vec![message(11, REGULAR_PAYLOAD)]).into();
-			message_proof.result.as_mut().unwrap()[0].1.lane_state = Some(OutboundLaneData {
-				latest_received_nonce: 9,
-				..Default::default()
-			});
+			let mut message_proof: TestMessagesProof =
+				Ok(vec![message(11, REGULAR_PAYLOAD)]).into();
+			message_proof.result.as_mut().unwrap()[0].1.lane_state =
+				Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() });
 
 			assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
 				Origin::signed(1),
@@ -1446,18 +1659,16 @@ mod tests {
 	}
 
 	#[test]
-	fn receive_messages_proof_rejects_invalid_dispatch_weight() {
+	fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() {
 		run_test(|| {
-			assert_noop!(
-				Pallet::<TestRuntime>::receive_messages_proof(
-					Origin::signed(1),
-					TEST_RELAYER_A,
-					Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
-					1,
-					REGULAR_PAYLOAD.declared_weight - 1,
-				),
-				Error::<TestRuntime, DefaultInstance>::InvalidMessagesDispatchWeight,
-			);
+			assert_ok!(Pallet::<TestRuntime>::receive_messages_proof(
+				Origin::signed(1),
+				TEST_RELAYER_A,
+				Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
+				1,
+				REGULAR_PAYLOAD.declared_weight - 1,
+			));
+			assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 0);
 		});
 	}
 
@@ -1465,14 +1676,14 @@ mod tests {
 	fn receive_messages_proof_rejects_invalid_proof() {
 		run_test(|| {
 			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
+				Pallet::<TestRuntime, ()>::receive_messages_proof(
 					Origin::signed(1),
 					TEST_RELAYER_A,
 					Err(()).into(),
 					1,
 					0,
 				),
-				Error::<TestRuntime, DefaultInstance>::InvalidMessagesProof,
+				Error::<TestRuntime, ()>::InvalidMessagesProof,
 			);
 		});
 	}
@@ -1481,14 +1692,14 @@ mod tests {
 	fn receive_messages_proof_rejects_proof_with_too_many_messages() {
 		run_test(|| {
 			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
+				Pallet::<TestRuntime, ()>::receive_messages_proof(
 					Origin::signed(1),
 					TEST_RELAYER_A,
 					Ok(vec![message(1, REGULAR_PAYLOAD)]).into(),
 					u32::MAX,
 					0,
 				),
-				Error::<TestRuntime, DefaultInstance>::TooManyMessagesInTheProof,
+				Error::<TestRuntime, ()>::TooManyMessagesInTheProof,
 			);
 		});
 	}
@@ -1500,7 +1711,7 @@ mod tests {
 			receive_messages_delivery_proof();
 
 			assert_eq!(
-				OutboundLanes::<DefaultInstance>::get(&TEST_LANE_ID).latest_received_nonce,
+				OutboundLanes::<TestRuntime, ()>::get(&TEST_LANE_ID).latest_received_nonce,
 				1,
 			);
 		});
@@ -1528,7 +1739,9 @@ mod tests {
 				TestMessagesDeliveryProof(Ok((
 					TEST_LANE_ID,
 					InboundLaneData {
-						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(),
+						relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)]
+							.into_iter()
+							.collect(),
 						..Default::default()
 					}
 				))),
@@ -1538,16 +1751,11 @@ mod tests {
 					..Default::default()
 				},
 			));
-			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_A,
-				1000
-			));
-			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_B,
-				2000
-			));
+			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000));
+			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000));
 
-			// this reports delivery of both message 1 and message 2 => reward is paid only to TEST_RELAYER_B
+			// this reports delivery of both message 1 and message 2 => reward is paid only to
+			// TEST_RELAYER_B
 			assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
 				Origin::signed(1),
 				TestMessagesDeliveryProof(Ok((
@@ -1568,14 +1776,8 @@ mod tests {
 					..Default::default()
 				},
 			));
-			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_A,
-				1000
-			));
-			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(
-				TEST_RELAYER_B,
-				2000
-			));
+			assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000));
+			assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000));
 		});
 	}
 
@@ -1588,7 +1790,7 @@ mod tests {
 					TestMessagesDeliveryProof(Err(())),
 					Default::default(),
 				),
-				Error::<TestRuntime, DefaultInstance>::InvalidMessagesDeliveryProof,
+				Error::<TestRuntime, ()>::InvalidMessagesDeliveryProof,
 			);
 		});
 	}
@@ -1596,7 +1798,7 @@ mod tests {
 	#[test]
 	fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() {
 		run_test(|| {
-			// when number of relayers entires is invalid
+			// when number of relayers entries is invalid
 			assert_noop!(
 				Pallet::<TestRuntime>::receive_messages_delivery_proof(
 					Origin::signed(1),
@@ -1618,7 +1820,7 @@ mod tests {
 						..Default::default()
 					},
 				),
-				Error::<TestRuntime, DefaultInstance>::InvalidUnrewardedRelayersState,
+				Error::<TestRuntime, ()>::InvalidUnrewardedRelayersState,
 			);
 
 			// when number of messages is invalid
@@ -1643,7 +1845,7 @@ mod tests {
 						..Default::default()
 					},
 				),
-				Error::<TestRuntime, DefaultInstance>::InvalidUnrewardedRelayersState,
+				Error::<TestRuntime, ()>::InvalidUnrewardedRelayersState,
 			);
 		});
 	}
@@ -1654,7 +1856,7 @@ mod tests {
 			let mut invalid_message = message(1, REGULAR_PAYLOAD);
 			invalid_message.data.payload = Vec::new();
 
-			assert_ok!(Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
+			assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
 				Origin::signed(1),
 				TEST_RELAYER_A,
 				Ok(vec![invalid_message]).into(),
@@ -1662,10 +1864,7 @@ mod tests {
 				0, // weight may be zero in this case (all messages are improperly encoded)
 			));
 
-			assert_eq!(
-				InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(),
-				1,
-			);
+			assert_eq!(InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(), 1,);
 		});
 	}
 
@@ -1675,31 +1874,26 @@ mod tests {
 			let mut invalid_message = message(2, REGULAR_PAYLOAD);
 			invalid_message.data.payload = Vec::new();
 
-			assert_ok!(Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
+			assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
 				Origin::signed(1),
 				TEST_RELAYER_A,
-				Ok(vec![
-					message(1, REGULAR_PAYLOAD),
-					invalid_message,
-					message(3, REGULAR_PAYLOAD),
-				])
+				Ok(
+					vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),]
+				)
 				.into(),
 				3,
 				REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight,
 			));
 
-			assert_eq!(
-				InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(),
-				3,
-			);
+			assert_eq!(InboundLanes::<TestRuntime>::get(&TEST_LANE_ID).last_delivered_nonce(), 3,);
 		});
 	}
 
 	#[test]
 	fn storage_message_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted messages proofs.
-		let storage_key = storage_keys::message_key::<DefaultInstance>(&*b"test", 42).0;
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted messages proofs.
+		let storage_key = storage_keys::message_key("BridgeMessages", &*b"test", 42).0;
 		assert_eq!(
 			storage_key,
 			hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(),
@@ -1710,9 +1904,9 @@ mod tests {
 
 	#[test]
 	fn outbound_lane_data_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted outbound lane state proofs.
-		let storage_key = storage_keys::outbound_lane_data_key::<DefaultInstance>(&*b"test").0;
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted outbound lane state proofs.
+		let storage_key = storage_keys::outbound_lane_data_key("BridgeMessages", &*b"test").0;
 		assert_eq!(
 			storage_key,
 			hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(),
@@ -1723,9 +1917,9 @@ mod tests {
 
 	#[test]
 	fn inbound_lane_data_key_computed_properly() {
-		// If this test fails, then something has been changed in module storage that is breaking all
-		// previously crafted inbound lane state proofs.
-		let storage_key = storage_keys::inbound_lane_data_key::<DefaultInstance>(&*b"test").0;
+		// If this test fails, then something has been changed in module storage that is breaking
+		// all previously crafted inbound lane state proofs.
+		let storage_key = storage_keys::inbound_lane_data_key("BridgeMessages", &*b"test").0;
 		assert_eq!(
 			storage_key,
 			hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(),
@@ -1739,19 +1933,17 @@ mod tests {
 		run_test(|| {
 			let message1 = message(1, message_payload(0, Weight::MAX / 2));
 			let message2 = message(2, message_payload(0, Weight::MAX / 2));
-			let message3 = message(2, message_payload(0, Weight::MAX / 2));
+			let message3 = message(3, message_payload(0, Weight::MAX / 2));
 
-			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::receive_messages_proof(
-					Origin::signed(1),
-					TEST_RELAYER_A,
-					// this may cause overflow if source chain storage is invalid
-					Ok(vec![message1, message2, message3]).into(),
-					3,
-					100,
-				),
-				Error::<TestRuntime, DefaultInstance>::InvalidMessagesDispatchWeight,
-			);
+			assert_ok!(Pallet::<TestRuntime, ()>::receive_messages_proof(
+				Origin::signed(1),
+				TEST_RELAYER_A,
+				// this may cause overflow if source chain storage is invalid
+				Ok(vec![message1, message2, message3]).into(),
+				3,
+				Weight::MAX,
+			));
+			assert_eq!(InboundLanes::<TestRuntime>::get(TEST_LANE_ID).last_delivered_nonce(), 2);
 		});
 	}
 
@@ -1762,8 +1954,13 @@ mod tests {
 			receive_messages_delivery_proof();
 
 			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
-				Error::<TestRuntime, DefaultInstance>::MessageIsAlreadyDelivered,
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
+				Error::<TestRuntime, ()>::MessageIsAlreadyDelivered,
 			);
 		});
 	}
@@ -1772,8 +1969,13 @@ mod tests {
 	fn increase_message_fee_fails_if_message_is_not_yet_sent() {
 		run_test(|| {
 			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
-				Error::<TestRuntime, DefaultInstance>::MessageIsNotYetSent,
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
+				Error::<TestRuntime, ()>::MessageIsNotYetSent,
 			);
 		});
 	}
@@ -1786,8 +1988,13 @@ mod tests {
 			TestMessageDeliveryAndDispatchPayment::reject_payments();
 
 			assert_noop!(
-				Pallet::<TestRuntime, DefaultInstance>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 100,),
-				Error::<TestRuntime, DefaultInstance>::FailedToWithdrawMessageFee,
+				Pallet::<TestRuntime, ()>::increase_message_fee(
+					Origin::signed(1),
+					TEST_LANE_ID,
+					1,
+					100,
+				),
+				Error::<TestRuntime, ()>::FailedToWithdrawMessageFee,
 			);
 		});
 	}
@@ -1797,7 +2004,7 @@ mod tests {
 		run_test(|| {
 			send_regular_message();
 
-			assert_ok!(Pallet::<TestRuntime, DefaultInstance>::increase_message_fee(
+			assert_ok!(Pallet::<TestRuntime, ()>::increase_message_fee(
 				Origin::signed(1),
 				TEST_LANE_ID,
 				1,
@@ -1820,11 +2027,12 @@ mod tests {
 				payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid;
 				let proof = Ok(vec![message(nonce, payload)]).into();
 				let messages_count = 1;
-				let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
-					&proof,
-					messages_count,
-					REGULAR_PAYLOAD.declared_weight,
-				);
+				let pre_dispatch_weight =
+					<TestRuntime as Config>::WeightInfo::receive_messages_proof_weight(
+						&proof,
+						messages_count,
+						REGULAR_PAYLOAD.declared_weight,
+					);
 				let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_proof(
 					Origin::signed(1),
 					TEST_RELAYER_A,
@@ -1848,7 +2056,8 @@ mod tests {
 			assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
 
 			// when dispatch is returning `unspent_weight > declared_weight`
-			let (pre, post) = submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false);
+			let (pre, post) =
+				submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false);
 			assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight);
 
 			// when there's no unspent weight
@@ -1923,10 +2132,231 @@ mod tests {
 			));
 
 			// ensure that both callbacks have been called twice: for 1+2, then for 3
-			crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
-			crate::mock::TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3);
-			crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
-			crate::mock::TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3);
+			TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
+			TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3);
+			TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2);
+			TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3);
+		});
+	}
+
+	fn confirm_3_messages_delivery() -> (Weight, Weight) {
+		send_regular_message();
+		send_regular_message();
+		send_regular_message();
+
+		let proof = TestMessagesDeliveryProof(Ok((
+			TEST_LANE_ID,
+			InboundLaneData {
+				last_confirmed_nonce: 0,
+				relayers: vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)].into_iter().collect(),
+			},
+		)));
+		let relayers_state = UnrewardedRelayersState {
+			unrewarded_relayer_entries: 1,
+			total_messages: 3,
+			..Default::default()
+		};
+		let pre_dispatch_weight =
+			<TestRuntime as Config>::WeightInfo::receive_messages_delivery_proof_weight(
+				&proof,
+				&relayers_state,
+				crate::mock::DbWeight::get(),
+			);
+		let post_dispatch_weight = Pallet::<TestRuntime>::receive_messages_delivery_proof(
+			Origin::signed(1),
+			proof,
+			relayers_state,
+		)
+		.expect("confirmation has failed")
+		.actual_weight
+		.expect("receive_messages_delivery_proof always returns Some");
+		(pre_dispatch_weight, post_dispatch_weight)
+	}
+
+	#[test]
+	fn receive_messages_delivery_proof_refunds_zero_weight() {
+		run_test(|| {
+			let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery();
+			assert_eq!(pre_dispatch_weight, post_dispatch_weight);
+		});
+	}
+
+	#[test]
+	fn receive_messages_delivery_proof_refunds_non_zero_weight() {
+		run_test(|| {
+			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().writes(1),
+			);
+
+			let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery();
+			assert_eq!(
+				pre_dispatch_weight.saturating_sub(post_dispatch_weight),
+				crate::mock::DbWeight::get().reads(1) * 3
+			);
+		});
+	}
+
+	#[test]
+	#[should_panic]
+	fn receive_messages_panics_in_debug_mode_if_callback_is_wrong() {
+		run_test(|| {
+			TestOnDeliveryConfirmed1::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().reads_writes(2, 2),
+			);
+			confirm_3_messages_delivery()
+		});
+	}
+
+	#[test]
+	fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected(
+	) {
+		run_test(|| {
+			// send message first to be able to check that delivery_proof fails later
+			send_regular_message();
+
+			// 1) InboundLaneData declares that the `last_confirmed_nonce` is 1;
+			// 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()`
+			//    returns `last_confirmed_nonce`;
+			// 3) it means that we're going to confirm delivery of messages 1..=1;
+			// 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and
+			//    numer of actually confirmed messages is `1`.
+			assert_noop!(
+				Pallet::<TestRuntime>::receive_messages_delivery_proof(
+					Origin::signed(1),
+					TestMessagesDeliveryProof(Ok((
+						TEST_LANE_ID,
+						InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() },
+					))),
+					UnrewardedRelayersState::default(),
+				),
+				Error::<TestRuntime, ()>::TryingToConfirmMoreMessagesThanExpected,
+			);
+		});
+	}
+
+	#[test]
+	fn increase_message_fee_weight_depends_on_message_size() {
+		run_test(|| {
+			let mut small_payload = message_payload(0, 100);
+			let mut large_payload = message_payload(1, 100);
+			small_payload.extra = vec![1; 100];
+			large_payload.extra = vec![2; 16_384];
+
+			assert_ok!(Pallet::<TestRuntime>::send_message(
+				Origin::signed(1),
+				TEST_LANE_ID,
+				small_payload,
+				100,
+			));
+			assert_ok!(Pallet::<TestRuntime>::send_message(
+				Origin::signed(1),
+				TEST_LANE_ID,
+				large_payload,
+				100,
+			));
+
+			let small_weight =
+				Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1)
+					.expect("increase_message_fee has failed")
+					.actual_weight
+					.expect("increase_message_fee always returns Some");
+
+			let large_weight =
+				Pallet::<TestRuntime>::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 2, 1)
+					.expect("increase_message_fee has failed")
+					.actual_weight
+					.expect("increase_message_fee always returns Some");
+
+			assert!(
+				large_weight > small_weight,
+				"Actual post-dispatch weigth for larger message {} must be larger than {} for small message",
+				large_weight,
+				small_weight,
+			);
+		});
+	}
+
+	#[test]
+	fn weight_is_refunded_for_messages_that_are_not_pruned() {
+		run_test(|| {
+			// send first MAX messages - no messages are pruned
+			let max_messages_to_prune = crate::mock::MaxMessagesToPruneAtOnce::get();
+			let when_zero_messages_are_pruned = send_regular_message();
+			let mut delivered_messages = DeliveredMessages::new(1, true);
+			for _ in 1..max_messages_to_prune {
+				assert_eq!(send_regular_message(), when_zero_messages_are_pruned);
+				delivered_messages.note_dispatched_message(true);
+			}
+
+			// confirm delivery of all sent messages
+			assert_ok!(Pallet::<TestRuntime>::receive_messages_delivery_proof(
+				Origin::signed(1),
+				TestMessagesDeliveryProof(Ok((
+					TEST_LANE_ID,
+					InboundLaneData {
+						last_confirmed_nonce: 1,
+						relayers: vec![UnrewardedRelayer {
+							relayer: 0,
+							messages: delivered_messages,
+						}]
+						.into_iter()
+						.collect(),
+					},
+				))),
+				UnrewardedRelayersState {
+					unrewarded_relayer_entries: 1,
+					total_messages: max_messages_to_prune,
+					..Default::default()
+				},
+			));
+
+			// when next message is sent, MAX messages are pruned
+			let weight_when_max_messages_are_pruned = send_regular_message();
+			assert_eq!(
+				weight_when_max_messages_are_pruned,
+				when_zero_messages_are_pruned +
+					crate::mock::DbWeight::get().writes(max_messages_to_prune),
+			);
+		});
+	}
+
+	#[test]
+	fn message_accepted_callbacks_are_called() {
+		run_test(|| {
+			send_regular_message();
+			TestOnMessageAccepted::ensure_called(&TEST_LANE_ID, &1);
+		});
+	}
+
+	#[test]
+	#[should_panic]
+	fn message_accepted_panics_in_debug_mode_if_callback_is_wrong() {
+		run_test(|| {
+			TestOnMessageAccepted::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().reads_writes(2, 2),
+			);
+			send_regular_message();
+		});
+	}
+
+	#[test]
+	fn message_accepted_refunds_non_zero_weight() {
+		run_test(|| {
+			TestOnMessageAccepted::set_consumed_weight_per_message(
+				crate::mock::DbWeight::get().writes(1),
+			);
+			let actual_callback_weight = send_regular_message();
+			let pre_dispatch_weight = <TestRuntime as Config>::WeightInfo::send_message_weight(
+				&REGULAR_PAYLOAD,
+				crate::mock::DbWeight::get(),
+			);
+			let prune_weight = crate::mock::DbWeight::get()
+				.writes(<TestRuntime as Config>::MaxMessagesToPruneAtOnce::get());
+
+			assert_eq!(
+				pre_dispatch_weight.saturating_sub(actual_callback_weight),
+				crate::mock::DbWeight::get().reads(1).saturating_add(prune_weight)
+			);
 		});
 	}
 }
diff --git a/polkadot/bridges/modules/messages/src/mock.rs b/polkadot/bridges/modules/messages/src/mock.rs
index 35358b76f26de6c85966ee67985f9607d607cc67..a333c95bb58b812ec0de1c316a8576c7ca474d8c 100644
--- a/polkadot/bridges/modules/messages/src/mock.rs
+++ b/polkadot/bridges/modules/messages/src/mock.rs
@@ -17,21 +17,26 @@
 // From construct_runtime macro
 #![allow(clippy::from_over_into)]
 
-use crate::Config;
+use crate::{instant_payments::cal_relayers_rewards, Config};
 
 use bitvec::prelude::*;
 use bp_messages::{
 	source_chain::{
-		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, RelayersRewards, Sender,
-		TargetHeaderChain,
+		LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed,
+		OnMessageAccepted, Sender, TargetHeaderChain,
 	},
-	target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain},
-	DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData,
-	Parameter as MessagesParameter, UnrewardedRelayer,
+	target_chain::{
+		DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain,
+	},
+	DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce,
+	OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayer,
 };
 use bp_runtime::{messages::MessageDispatchResult, Size};
 use codec::{Decode, Encode};
-use frame_support::{parameter_types, weights::Weight};
+use frame_support::{
+	parameter_types,
+	weights::{RuntimeDbWeight, Weight},
+};
 use scale_info::TypeInfo;
 use sp_core::H256;
 use sp_runtime::{
@@ -39,7 +44,10 @@ use sp_runtime::{
 	traits::{BlakeTwo256, IdentityLookup},
 	FixedU128, Perbill,
 };
-use std::collections::BTreeMap;
+use std::{
+	collections::{BTreeMap, VecDeque},
+	ops::RangeInclusive,
+};
 
 pub type AccountId = u64;
 pub type Balance = u64;
@@ -51,9 +59,11 @@ pub struct TestPayload {
 	pub declared_weight: Weight,
 	/// Message dispatch result.
 	///
-	/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, but for test
-	/// purposes we'll be making it larger than `declared_weight` sometimes.
+	/// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`,
+	/// but for test purposes we'll be making it larger than `declared_weight` sometimes.
 	pub dispatch_result: MessageDispatchResult,
+	/// Extra bytes that affect payload size.
+	pub extra: Vec<u8>,
 }
 pub type TestMessageFee = u64;
 pub type TestRelayer = u64;
@@ -88,6 +98,7 @@ parameter_types! {
 	pub const MaximumBlockWeight: Weight = 1024;
 	pub const MaximumBlockLength: u32 = 2 * 1024;
 	pub const AvailableBlockRatio: Perbill = Perbill::one();
+	pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 };
 }
 
 impl frame_system::Config for TestRuntime {
@@ -111,7 +122,7 @@ impl frame_system::Config for TestRuntime {
 	type SystemWeightInfo = ();
 	type BlockWeights = ();
 	type BlockLength = ();
-	type DbWeight = ();
+	type DbWeight = DbWeight;
 	type SS58Prefix = ();
 	type OnSetCode = ();
 }
@@ -137,6 +148,7 @@ parameter_types! {
 	pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16;
 	pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 32;
 	pub storage TokenConversionRate: FixedU128 = 1.into();
+  pub const TestBridgedChainId: bp_runtime::ChainId = *b"test";
 }
 
 #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)]
@@ -147,7 +159,8 @@ pub enum TestMessagesParameter {
 impl MessagesParameter for TestMessagesParameter {
 	fn save(&self) {
 		match *self {
-			TestMessagesParameter::TokenConversionRate(conversion_rate) => TokenConversionRate::set(&conversion_rate),
+			TestMessagesParameter::TokenConversionRate(conversion_rate) =>
+				TokenConversionRate::set(&conversion_rate),
 		}
 	}
 }
@@ -172,15 +185,17 @@ impl Config for TestRuntime {
 	type TargetHeaderChain = TestTargetHeaderChain;
 	type LaneMessageVerifier = TestLaneMessageVerifier;
 	type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment;
+	type OnMessageAccepted = TestOnMessageAccepted;
 	type OnDeliveryConfirmed = (TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2);
 
 	type SourceHeaderChain = TestSourceHeaderChain;
 	type MessageDispatch = TestMessageDispatch;
+	type BridgedChainId = TestBridgedChainId;
 }
 
 impl Size for TestPayload {
 	fn size_hint(&self) -> u32 {
-		16
+		16 + self.extra.len() as u32
 	}
 }
 
@@ -227,14 +242,12 @@ impl From<Result<Vec<Message<TestMessageFee>>, ()>> for TestMessagesProof {
 	fn from(result: Result<Vec<Message<TestMessageFee>>, ()>) -> Self {
 		Self {
 			result: result.map(|messages| {
-				let mut messages_by_lane: BTreeMap<LaneId, ProvedLaneMessages<Message<TestMessageFee>>> =
-					BTreeMap::new();
+				let mut messages_by_lane: BTreeMap<
+					LaneId,
+					ProvedLaneMessages<Message<TestMessageFee>>,
+				> = BTreeMap::new();
 				for message in messages {
-					messages_by_lane
-						.entry(message.key.lane_id)
-						.or_default()
-						.messages
-						.push(message);
+					messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message);
 				}
 				messages_by_lane.into_iter().collect()
 			}),
@@ -310,7 +323,8 @@ impl TestMessageDeliveryAndDispatchPayment {
 
 	/// Returns true if given fee has been paid by given submitter.
 	pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool {
-		frame_support::storage::unhashed::get(b":message-fee:") == Some((Sender::Signed(submitter), fee))
+		frame_support::storage::unhashed::get(b":message-fee:") ==
+			Some((Sender::Signed(submitter), fee))
 	}
 
 	/// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is
@@ -321,7 +335,9 @@ impl TestMessageDeliveryAndDispatchPayment {
 	}
 }
 
-impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessageDeliveryAndDispatchPayment {
+impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee>
+	for TestMessageDeliveryAndDispatchPayment
+{
 	type Error = &'static str;
 
 	fn pay_delivery_and_dispatch_fee(
@@ -330,7 +346,7 @@ impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessag
 		_relayer_fund_account: &AccountId,
 	) -> Result<(), Self::Error> {
 		if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) {
-			return Err(TEST_ERROR);
+			return Err(TEST_ERROR)
 		}
 
 		frame_support::storage::unhashed::put(b":message-fee:", &(submitter, fee));
@@ -338,17 +354,51 @@ impl MessageDeliveryAndDispatchPayment<AccountId, TestMessageFee> for TestMessag
 	}
 
 	fn pay_relayers_rewards(
+		lane_id: LaneId,
+		message_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
 		_confirmation_relayer: &AccountId,
-		relayers_rewards: RelayersRewards<AccountId, TestMessageFee>,
+		received_range: &RangeInclusive<MessageNonce>,
 		_relayer_fund_account: &AccountId,
 	) {
-		for (relayer, reward) in relayers_rewards {
+		let relayers_rewards =
+			cal_relayers_rewards::<TestRuntime, ()>(lane_id, message_relayers, received_range);
+		for (relayer, reward) in &relayers_rewards {
 			let key = (b":relayer-reward:", relayer, reward.reward).encode();
 			frame_support::storage::unhashed::put(&key, &true);
 		}
 	}
 }
 
+#[derive(Debug)]
+pub struct TestOnMessageAccepted;
+
+impl TestOnMessageAccepted {
+	/// Verify that the callback has been called when the message is accepted.
+	pub fn ensure_called(lane: &LaneId, message: &MessageNonce) {
+		let key = (b"TestOnMessageAccepted", lane, message).encode();
+		assert_eq!(frame_support::storage::unhashed::get(&key), Some(true));
+	}
+
+	/// Set consumed weight returned by the callback.
+	pub fn set_consumed_weight_per_message(weight: Weight) {
+		frame_support::storage::unhashed::put(b"TestOnMessageAccepted_Weight", &weight);
+	}
+
+	/// Get consumed weight returned by the callback.
+	pub fn get_consumed_weight_per_message() -> Option<Weight> {
+		frame_support::storage::unhashed::get(b"TestOnMessageAccepted_Weight")
+	}
+}
+
+impl OnMessageAccepted for TestOnMessageAccepted {
+	fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight {
+		let key = (b"TestOnMessageAccepted", lane, message).encode();
+		frame_support::storage::unhashed::put(&key, &true);
+		Self::get_consumed_weight_per_message()
+			.unwrap_or_else(|| DbWeight::get().reads_writes(1, 1))
+	}
+}
+
 /// First on-messages-delivered callback.
 #[derive(Debug)]
 pub struct TestOnDeliveryConfirmed1;
@@ -359,16 +409,29 @@ impl TestOnDeliveryConfirmed1 {
 		let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode();
 		assert_eq!(frame_support::storage::unhashed::get(&key), Some(true));
 	}
+
+	/// Set consumed weight returned by the callback.
+	pub fn set_consumed_weight_per_message(weight: Weight) {
+		frame_support::storage::unhashed::put(b"TestOnDeliveryConfirmed1_Weight", &weight);
+	}
+
+	/// Get consumed weight returned by the callback.
+	pub fn get_consumed_weight_per_message() -> Option<Weight> {
+		frame_support::storage::unhashed::get(b"TestOnDeliveryConfirmed1_Weight")
+	}
 }
 
 impl OnDeliveryConfirmed for TestOnDeliveryConfirmed1 {
-	fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) {
+	fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight {
 		let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode();
 		frame_support::storage::unhashed::put(&key, &true);
+		Self::get_consumed_weight_per_message()
+			.unwrap_or_else(|| DbWeight::get().reads_writes(1, 1))
+			.saturating_mul(messages.total_messages())
 	}
 }
 
-/// Seconde on-messages-delivered callback.
+/// Second on-messages-delivered callback.
 #[derive(Debug)]
 pub struct TestOnDeliveryConfirmed2;
 
@@ -381,9 +444,10 @@ impl TestOnDeliveryConfirmed2 {
 }
 
 impl OnDeliveryConfirmed for TestOnDeliveryConfirmed2 {
-	fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) {
+	fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight {
 		let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode();
 		frame_support::storage::unhashed::put(&key, &true);
+		0
 	}
 }
 
@@ -400,10 +464,7 @@ impl SourceHeaderChain<TestMessageFee> for TestSourceHeaderChain {
 		proof: Self::MessagesProof,
 		_messages_count: u32,
 	) -> Result<ProvedMessages<Message<TestMessageFee>>, Self::Error> {
-		proof
-			.result
-			.map(|proof| proof.into_iter().collect())
-			.map_err(|_| TEST_ERROR)
+		proof.result.map(|proof| proof.into_iter().collect()).map_err(|_| TEST_ERROR)
 	}
 }
 
@@ -434,30 +495,17 @@ impl MessageDispatch<AccountId, TestMessageFee> for TestMessageDispatch {
 
 /// Return test lane message with given nonce and payload.
 pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message<TestMessageFee> {
-	Message {
-		key: MessageKey {
-			lane_id: TEST_LANE_ID,
-			nonce,
-		},
-		data: message_data(payload),
-	}
+	Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, data: message_data(payload) }
 }
 
 /// Constructs message payload using given arguments and zero unspent weight.
 pub const fn message_payload(id: u64, declared_weight: Weight) -> TestPayload {
-	TestPayload {
-		id,
-		declared_weight,
-		dispatch_result: dispatch_result(0),
-	}
+	TestPayload { id, declared_weight, dispatch_result: dispatch_result(0), extra: Vec::new() }
 }
 
 /// Return message data with valid fee for given payload.
 pub fn message_data(payload: TestPayload) -> MessageData<TestMessageFee> {
-	MessageData {
-		payload: payload.encode(),
-		fee: 1,
-	}
+	MessageData { payload: payload.encode(), fee: 1 }
 }
 
 /// Returns message dispatch result with given unspent weight.
@@ -491,14 +539,10 @@ pub fn unrewarded_relayer(
 
 /// Run pallet test.
 pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
-	let mut t = frame_system::GenesisConfig::default()
-		.build_storage::<TestRuntime>()
+	let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
+	pallet_balances::GenesisConfig::<TestRuntime> { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] }
+		.assimilate_storage(&mut t)
 		.unwrap();
-	pallet_balances::GenesisConfig::<TestRuntime> {
-		balances: vec![(ENDOWED_ACCOUNT, 1_000_000)],
-	}
-	.assimilate_storage(&mut t)
-	.unwrap();
 	let mut ext = sp_io::TestExternalities::new(t);
 	ext.execute_with(test)
 }
diff --git a/polkadot/bridges/modules/messages/src/outbound_lane.rs b/polkadot/bridges/modules/messages/src/outbound_lane.rs
index 44061d984e1d08f3dbcfcb7e179554878fb01e97..c05437596db82af9f4ca4d3bacae457aa32c58dc 100644
--- a/polkadot/bridges/modules/messages/src/outbound_lane.rs
+++ b/polkadot/bridges/modules/messages/src/outbound_lane.rs
@@ -18,7 +18,8 @@
 
 use bitvec::prelude::*;
 use bp_messages::{
-	DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer,
+	DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData,
+	UnrewardedRelayer,
 };
 use frame_support::RuntimeDebug;
 use sp_std::collections::vec_deque::VecDeque;
@@ -49,7 +50,7 @@ pub enum ReceivalConfirmationResult {
 	/// New messages have been confirmed by the confirmation transaction.
 	ConfirmedMessages(DeliveredMessages),
 	/// Confirmation transaction brings no new confirmation. This may be a result of relayer
-	/// error or several relayers runnng.
+	/// error or several relayers running.
 	NoNewConfirmations,
 	/// Bridged chain is trying to confirm more messages than we have generated. May be a result
 	/// of invalid bridged chain storage.
@@ -57,12 +58,14 @@ pub enum ReceivalConfirmationResult {
 	/// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged
 	/// chain storage.
 	EmptyUnrewardedRelayerEntry,
-	/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid bridged
-	/// chain storage.
+	/// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid
+	/// bridged chain storage.
 	NonConsecutiveUnrewardedRelayerEntries,
-	/// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May be
-	/// a result of invalid bridged chain storage.
+	/// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May
+	/// be a result of invalid bridged chain storage.
 	InvalidNumberOfDispatchResults,
+	/// The chain has more messages that need to be confirmed than there is in the proof.
+	TryingToConfirmMoreMessagesThanExpected(MessageNonce),
 }
 
 /// Outbound messages lane.
@@ -98,30 +101,44 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
 	/// Confirm messages delivery.
 	pub fn confirm_delivery<RelayerId>(
 		&mut self,
-		latest_received_nonce: MessageNonce,
+		max_allowed_messages: MessageNonce,
+		latest_delivered_nonce: MessageNonce,
 		relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
 	) -> ReceivalConfirmationResult {
 		let mut data = self.storage.data();
-		if latest_received_nonce <= data.latest_received_nonce {
-			return ReceivalConfirmationResult::NoNewConfirmations;
+		if latest_delivered_nonce <= data.latest_received_nonce {
+			return ReceivalConfirmationResult::NoNewConfirmations
+		}
+		if latest_delivered_nonce > data.latest_generated_nonce {
+			return ReceivalConfirmationResult::FailedToConfirmFutureMessages
 		}
-		if latest_received_nonce > data.latest_generated_nonce {
-			return ReceivalConfirmationResult::FailedToConfirmFutureMessages;
+		if latest_delivered_nonce - data.latest_received_nonce > max_allowed_messages {
+			// that the relayer has declared correct number of messages that the proof contains (it
+			// is checked outside of the function). But it may happen (but only if this/bridged
+			// chain storage is corrupted, though) that the actual number of confirmed messages if
+			// larger than declared. This would mean that 'reward loop' will take more time than the
+			// weight formula accounts, so we can't allow that.
+			return ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(
+				latest_delivered_nonce - data.latest_received_nonce,
+			)
 		}
 
-		let dispatch_results =
-			match extract_dispatch_results(data.latest_received_nonce, latest_received_nonce, relayers) {
-				Ok(dispatch_results) => dispatch_results,
-				Err(extract_error) => return extract_error,
-			};
+		let dispatch_results = match extract_dispatch_results(
+			data.latest_received_nonce,
+			latest_delivered_nonce,
+			relayers,
+		) {
+			Ok(dispatch_results) => dispatch_results,
+			Err(extract_error) => return extract_error,
+		};
 
 		let prev_latest_received_nonce = data.latest_received_nonce;
-		data.latest_received_nonce = latest_received_nonce;
+		data.latest_received_nonce = latest_delivered_nonce;
 		self.storage.set_data(data);
 
 		ReceivalConfirmationResult::ConfirmedMessages(DeliveredMessages {
 			begin: prev_latest_received_nonce + 1,
-			end: latest_received_nonce,
+			end: latest_delivered_nonce,
 			dispatch_results,
 		})
 	}
@@ -133,7 +150,9 @@ impl<S: OutboundLaneStorage> OutboundLane<S> {
 		let mut pruned_messages = 0;
 		let mut anything_changed = false;
 		let mut data = self.storage.data();
-		while pruned_messages < max_messages_to_prune && data.oldest_unpruned_nonce <= data.latest_received_nonce {
+		while pruned_messages < max_messages_to_prune &&
+			data.oldest_unpruned_nonce <= data.latest_received_nonce
+		{
 			self.storage.remove_message(&data.oldest_unpruned_nonce);
 
 			anything_changed = true;
@@ -158,9 +177,10 @@ fn extract_dispatch_results<RelayerId>(
 	latest_received_nonce: MessageNonce,
 	relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
 ) -> Result<DispatchResultsBitVec, ReceivalConfirmationResult> {
-	// the only caller of this functions checks that the prev_latest_received_nonce..=latest_received_nonce
-	// is valid, so we're ready to accept messages in this range
-	// => with_capacity call must succeed here or we'll be unable to receive confirmations at all
+	// the only caller of this functions checks that the
+	// prev_latest_received_nonce..=latest_received_nonce is valid, so we're ready to accept
+	// messages in this range => with_capacity call must succeed here or we'll be unable to receive
+	// confirmations at all
 	let mut received_dispatch_result =
 		BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _);
 	let mut last_entry_end: Option<MessageNonce> = None;
@@ -168,43 +188,48 @@ fn extract_dispatch_results<RelayerId>(
 		// unrewarded relayer entry must have at least 1 unconfirmed message
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if entry.messages.end < entry.messages.begin {
-			return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry);
+			return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry)
 		}
 		// every entry must confirm range of messages that follows previous entry range
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if let Some(last_entry_end) = last_entry_end {
 			let expected_entry_begin = last_entry_end.checked_add(1);
 			if expected_entry_begin != Some(entry.messages.begin) {
-				return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries);
+				return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries)
 			}
 		}
 		last_entry_end = Some(entry.messages.end);
 		// entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()`
 		// (guaranteed by the `InboundLane::receive_message()`)
 		if entry.messages.end > latest_received_nonce {
-			// technically this will be detected in the next loop iteration as `InvalidNumberOfDispatchResults`
-			// but to guarantee safety of loop operations below this is detected now
-			return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages);
+			// technically this will be detected in the next loop iteration as
+			// `InvalidNumberOfDispatchResults` but to guarantee safety of loop operations below
+			// this is detected now
+			return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages)
 		}
 		// entry must have single dispatch result for every message
 		// (guaranteed by the `InboundLane::receive_message()`)
-		if entry.messages.dispatch_results.len() as MessageNonce != entry.messages.end - entry.messages.begin + 1 {
-			return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults);
+		if entry.messages.dispatch_results.len() as MessageNonce !=
+			entry.messages.end - entry.messages.begin + 1
+		{
+			return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults)
 		}
 
 		// now we know that the entry is valid
 		// => let's check if it brings new confirmations
-		let new_messages_begin = sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1);
+		let new_messages_begin =
+			sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1);
 		let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce);
 		let new_messages_range = new_messages_begin..=new_messages_end;
 		if new_messages_range.is_empty() {
-			continue;
+			continue
 		}
 
 		// now we know that entry brings new confirmations
 		// => let's extract dispatch results
 		received_dispatch_result.extend_from_bitslice(
-			&entry.messages.dispatch_results[(new_messages_begin - entry.messages.begin) as usize..],
+			&entry.messages.dispatch_results
+				[(new_messages_begin - entry.messages.begin) as usize..],
 		);
 	}
 
@@ -215,12 +240,17 @@ fn extract_dispatch_results<RelayerId>(
 mod tests {
 	use super::*;
 	use crate::{
-		mock::{message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID},
+		mock::{
+			message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD,
+			TEST_LANE_ID,
+		},
 		outbound_lane,
 	};
 	use sp_std::ops::RangeInclusive;
 
-	fn unrewarded_relayers(nonces: RangeInclusive<MessageNonce>) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
+	fn unrewarded_relayers(
+		nonces: RangeInclusive<MessageNonce>,
+	) -> VecDeque<UnrewardedRelayer<TestRelayer>> {
 		vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)]
 			.into_iter()
 			.collect()
@@ -245,7 +275,7 @@ mod tests {
 			lane.send_message(message_data(REGULAR_PAYLOAD));
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
 			assert_eq!(lane.storage.data().latest_received_nonce, 0);
-			let result = lane.confirm_delivery(latest_received_nonce, relayers);
+			let result = lane.confirm_delivery(3, latest_received_nonce, relayers);
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
 			assert_eq!(lane.storage.data().latest_received_nonce, 0);
 			result
@@ -273,7 +303,7 @@ mod tests {
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
 			assert_eq!(lane.storage.data().latest_received_nonce, 0);
 			assert_eq!(
-				lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
+				lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
 				ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)),
 			);
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
@@ -291,18 +321,18 @@ mod tests {
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
 			assert_eq!(lane.storage.data().latest_received_nonce, 0);
 			assert_eq!(
-				lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
+				lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
 				ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)),
 			);
 			assert_eq!(
-				lane.confirm_delivery(3, &unrewarded_relayers(1..=3)),
+				lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
 				ReceivalConfirmationResult::NoNewConfirmations,
 			);
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
 			assert_eq!(lane.storage.data().latest_received_nonce, 3);
 
 			assert_eq!(
-				lane.confirm_delivery(2, &unrewarded_relayers(1..=1)),
+				lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)),
 				ReceivalConfirmationResult::NoNewConfirmations,
 			);
 			assert_eq!(lane.storage.data().latest_generated_nonce, 3);
@@ -393,18 +423,40 @@ mod tests {
 			assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1);
 			// after confirmation, some messages are received
 			assert_eq!(
-				lane.confirm_delivery(2, &unrewarded_relayers(1..=2)),
+				lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)),
 				ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=2)),
 			);
 			assert_eq!(lane.prune_messages(100), 2);
 			assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3);
 			// after last message is confirmed, everything is pruned
 			assert_eq!(
-				lane.confirm_delivery(3, &unrewarded_relayers(3..=3)),
+				lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)),
 				ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(3..=3)),
 			);
 			assert_eq!(lane.prune_messages(100), 1);
 			assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4);
 		});
 	}
+
+	#[test]
+	fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() {
+		run_test(|| {
+			let mut lane = outbound_lane::<TestRuntime, _>(TEST_LANE_ID);
+			lane.send_message(message_data(REGULAR_PAYLOAD));
+			lane.send_message(message_data(REGULAR_PAYLOAD));
+			lane.send_message(message_data(REGULAR_PAYLOAD));
+			assert_eq!(
+				lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)),
+				ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3),
+			);
+			assert_eq!(
+				lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)),
+				ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3),
+			);
+			assert_eq!(
+				lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)),
+				ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)),
+			);
+		});
+	}
 }
diff --git a/polkadot/bridges/modules/messages/src/weights.rs b/polkadot/bridges/modules/messages/src/weights.rs
index 9b65c8217ad64c23ccc4e81c4e4aa31158780beb..9dce11168fbbc2cdaa347bac2bae61989cac313f 100644
--- a/polkadot/bridges/modules/messages/src/weights.rs
+++ b/polkadot/bridges/modules/messages/src/weights.rs
@@ -14,7 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-//! Autogenerated weights for pallet_bridge_messages
+//! Autogenerated weights for `pallet_bridge_messages`
 //!
 //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0
 //! DATE: 2021-06-18, STEPS: [50, ], REPEAT: 20
@@ -46,12 +46,13 @@ use frame_support::{
 };
 use sp_std::marker::PhantomData;
 
-/// Weight functions needed for pallet_bridge_messages.
+/// Weight functions needed for `pallet_bridge_messages`.
 pub trait WeightInfo {
 	fn send_minimal_message_worst_case() -> Weight;
 	fn send_1_kb_message_worst_case() -> Weight;
 	fn send_16_kb_message_worst_case() -> Weight;
-	fn increase_message_fee() -> Weight;
+	fn maximal_increase_message_fee() -> Weight;
+	fn increase_message_fee(i: u32) -> Weight;
 	fn receive_single_message_proof() -> Weight;
 	fn receive_two_messages_proof() -> Weight;
 	fn receive_single_message_proof_with_outbound_lane_state() -> Weight;
@@ -70,7 +71,7 @@ pub trait WeightInfo {
 	fn receive_delivery_proof_for_multiple_messages_by_multiple_relayers(i: u32) -> Weight;
 }
 
-/// Weights for pallet_bridge_messages using the Rialto node and recommended hardware.
+/// Weights for `pallet_bridge_messages` using the Rialto node and recommended hardware.
 pub struct RialtoWeight<T>(PhantomData<T>);
 impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
 	fn send_minimal_message_worst_case() -> Weight {
@@ -88,8 +89,14 @@ impl<T: frame_system::Config> WeightInfo for RialtoWeight<T> {
 			.saturating_add(T::DbWeight::get().reads(5 as Weight))
 			.saturating_add(T::DbWeight::get().writes(12 as Weight))
 	}
-	fn increase_message_fee() -> Weight {
-		(6_709_925_000 as Weight)
+	fn maximal_increase_message_fee() -> Weight {
+		(6_781_470_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(5 as Weight))
+			.saturating_add(T::DbWeight::get().writes(3 as Weight))
+	}
+	fn increase_message_fee(i: u32) -> Weight {
+		(114_963_000 as Weight)
+			.saturating_add((6_000 as Weight).saturating_mul(i as Weight))
 			.saturating_add(T::DbWeight::get().reads(5 as Weight))
 			.saturating_add(T::DbWeight::get().writes(3 as Weight))
 	}
@@ -202,8 +209,14 @@ impl WeightInfo for () {
 			.saturating_add(RocksDbWeight::get().reads(5 as Weight))
 			.saturating_add(RocksDbWeight::get().writes(12 as Weight))
 	}
-	fn increase_message_fee() -> Weight {
-		(6_709_925_000 as Weight)
+	fn maximal_increase_message_fee() -> Weight {
+		(6_781_470_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(5 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(3 as Weight))
+	}
+	fn increase_message_fee(i: u32) -> Weight {
+		(114_963_000 as Weight)
+			.saturating_add((6_000 as Weight).saturating_mul(i as Weight))
 			.saturating_add(RocksDbWeight::get().reads(5 as Weight))
 			.saturating_add(RocksDbWeight::get().writes(3 as Weight))
 	}
diff --git a/polkadot/bridges/modules/messages/src/weights_ext.rs b/polkadot/bridges/modules/messages/src/weights_ext.rs
index be440174b4b9bcb7d89e7ac87a04c611aebce9fa..fef09c6cebe577a1e92a5a36c5dda9549a135a28 100644
--- a/polkadot/bridges/modules/messages/src/weights_ext.rs
+++ b/polkadot/bridges/modules/messages/src/weights_ext.rs
@@ -20,21 +20,27 @@ use crate::weights::WeightInfo;
 
 use bp_messages::{MessageNonce, UnrewardedRelayersState};
 use bp_runtime::{PreComputedSize, Size};
-use frame_support::weights::Weight;
+use frame_support::weights::{RuntimeDbWeight, Weight};
 
 /// Size of the message being delivered in benchmarks.
 pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128;
 
-/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of calls
-/// we're checking here would fit 1KB.
+/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of
+/// calls we're checking here would fit 1KB.
 const SIGNED_EXTENSIONS_SIZE: u32 = 1024;
 
+/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at
+/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie.
+/// Some reserve is reserved to account future chain growth.
+pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
+
 /// Ensure that weights from `WeightInfoExt` implementation are looking correct.
 pub fn ensure_weights_are_correct<W: WeightInfoExt>(
 	expected_default_message_delivery_tx_weight: Weight,
 	expected_additional_byte_delivery_weight: Weight,
 	expected_messages_delivery_confirmation_tx_weight: Weight,
 	expected_pay_inbound_dispatch_fee_weight: Weight,
+	db_weight: RuntimeDbWeight,
 ) {
 	// verify `send_message` weight components
 	assert_ne!(W::send_message_overhead(), 0);
@@ -48,12 +54,15 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
 
 	// verify that the hardcoded value covers `receive_messages_proof` weight
 	let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight(
-		&PreComputedSize((EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize),
+		&PreComputedSize(
+			(EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize,
+		),
 		1,
 		0,
 	);
 	assert!(
-		actual_single_regular_message_delivery_tx_weight <= expected_default_message_delivery_tx_weight,
+		actual_single_regular_message_delivery_tx_weight <=
+			expected_default_message_delivery_tx_weight,
 		"Default message delivery transaction weight {} is larger than expected weight {}",
 		actual_single_regular_message_delivery_tx_weight,
 		expected_default_message_delivery_tx_weight,
@@ -82,9 +91,11 @@ pub fn ensure_weights_are_correct<W: WeightInfoExt>(
 			total_messages: 1,
 			..Default::default()
 		},
+		db_weight,
 	);
 	assert!(
-		actual_messages_delivery_confirmation_tx_weight <= expected_messages_delivery_confirmation_tx_weight,
+		actual_messages_delivery_confirmation_tx_weight <=
+			expected_messages_delivery_confirmation_tx_weight,
 		"Messages delivery confirmation transaction weight {} is larger than expected weight {}",
 		actual_messages_delivery_confirmation_tx_weight,
 		expected_messages_delivery_confirmation_tx_weight,
@@ -108,7 +119,8 @@ pub fn ensure_able_to_receive_message<W: WeightInfoExt>(
 	max_incoming_message_dispatch_weight: Weight,
 ) {
 	// verify that we're able to receive proof of maximal-size message
-	let max_delivery_transaction_size = max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE);
+	let max_delivery_transaction_size =
+		max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE);
 	assert!(
 		max_delivery_transaction_size <= max_extrinsic_size,
 		"Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}",
@@ -119,7 +131,9 @@ pub fn ensure_able_to_receive_message<W: WeightInfoExt>(
 
 	// verify that we're able to receive proof of maximal-size message with maximal dispatch weight
 	let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight(
-		&PreComputedSize((max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize),
+		&PreComputedSize(
+			(max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize,
+		),
 		1,
 		max_incoming_message_dispatch_weight,
 	);
@@ -138,6 +152,7 @@ pub fn ensure_able_to_receive_confirmation<W: WeightInfoExt>(
 	max_inbound_lane_data_proof_size_from_peer_chain: u32,
 	max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce,
 	max_unconfirmed_messages_at_inbound_lane: MessageNonce,
+	db_weight: RuntimeDbWeight,
 ) {
 	// verify that we're able to receive confirmation of maximal-size
 	let max_confirmation_transaction_size =
@@ -150,7 +165,8 @@ pub fn ensure_able_to_receive_confirmation<W: WeightInfoExt>(
 		max_extrinsic_size,
 	);
 
-	// verify that we're able to reward maximal number of relayers that have delivered maximal number of messages
+	// verify that we're able to reward maximal number of relayers that have delivered maximal
+	// number of messages
 	let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight(
 		&PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize),
 		&UnrewardedRelayersState {
@@ -158,6 +174,7 @@ pub fn ensure_able_to_receive_confirmation<W: WeightInfoExt>(
 			total_messages: max_unconfirmed_messages_at_inbound_lane,
 			..Default::default()
 		},
+		db_weight,
 	);
 	assert!(
 		max_confirmation_transaction_dispatch_weight <= max_extrinsic_weight,
@@ -180,18 +197,26 @@ pub trait WeightInfoExt: WeightInfo {
 	// Functions that are directly mapped to extrinsics weights.
 
 	/// Weight of message send extrinsic.
-	fn send_message_weight(message: &impl Size) -> Weight {
+	fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight {
 		let transaction_overhead = Self::send_message_overhead();
 		let message_size_overhead = Self::send_message_size_overhead(message.size_hint());
+		let call_back_overhead = Self::single_message_callback_overhead(db_weight);
 
-		transaction_overhead.saturating_add(message_size_overhead)
+		transaction_overhead
+			.saturating_add(message_size_overhead)
+			.saturating_add(call_back_overhead)
 	}
 
 	/// Weight of message delivery extrinsic.
-	fn receive_messages_proof_weight(proof: &impl Size, messages_count: u32, dispatch_weight: Weight) -> Weight {
+	fn receive_messages_proof_weight(
+		proof: &impl Size,
+		messages_count: u32,
+		dispatch_weight: Weight,
+	) -> Weight {
 		// basic components of extrinsic weight
 		let transaction_overhead = Self::receive_messages_proof_overhead();
-		let outbound_state_delivery_weight = Self::receive_messages_proof_outbound_lane_state_overhead();
+		let outbound_state_delivery_weight =
+			Self::receive_messages_proof_outbound_lane_state_overhead();
 		let messages_delivery_weight =
 			Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count));
 		let messages_dispatch_weight = dispatch_weight;
@@ -201,8 +226,9 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(messages_count.saturating_sub(1))
 			.saturating_add(Self::expected_extra_storage_proof_size());
 		let actual_proof_size = proof.size_hint();
-		let proof_size_overhead =
-			Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size));
+		let proof_size_overhead = Self::storage_proof_size_overhead(
+			actual_proof_size.saturating_sub(expected_proof_size),
+		);
 
 		transaction_overhead
 			.saturating_add(outbound_state_delivery_weight)
@@ -212,23 +238,37 @@ pub trait WeightInfoExt: WeightInfo {
 	}
 
 	/// Weight of confirmation delivery extrinsic.
-	fn receive_messages_delivery_proof_weight(proof: &impl Size, relayers_state: &UnrewardedRelayersState) -> Weight {
+	fn receive_messages_delivery_proof_weight(
+		proof: &impl Size,
+		relayers_state: &UnrewardedRelayersState,
+		db_weight: RuntimeDbWeight,
+	) -> Weight {
 		// basic components of extrinsic weight
 		let transaction_overhead = Self::receive_messages_delivery_proof_overhead();
-		let messages_overhead = Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages);
-		let relayers_overhead =
-			Self::receive_messages_delivery_proof_relayers_overhead(relayers_state.unrewarded_relayer_entries);
+		let messages_overhead =
+			Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages);
+		let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead(
+			relayers_state.unrewarded_relayer_entries,
+		);
 
 		// proof size overhead weight
 		let expected_proof_size = Self::expected_extra_storage_proof_size();
 		let actual_proof_size = proof.size_hint();
-		let proof_size_overhead =
-			Self::storage_proof_size_overhead(actual_proof_size.saturating_sub(expected_proof_size));
+		let proof_size_overhead = Self::storage_proof_size_overhead(
+			actual_proof_size.saturating_sub(expected_proof_size),
+		);
+
+		// and cost of calling `OnDeliveryConfirmed::on_messages_delivered()` for every confirmed
+		// message
+		let callback_overhead = relayers_state
+			.total_messages
+			.saturating_mul(Self::single_message_callback_overhead(db_weight));
 
 		transaction_overhead
 			.saturating_add(messages_overhead)
 			.saturating_add(relayers_overhead)
 			.saturating_add(proof_size_overhead)
+			.saturating_add(callback_overhead)
 	}
 
 	// Functions that are used by extrinsics weights formulas.
@@ -238,22 +278,26 @@ pub trait WeightInfoExt: WeightInfo {
 		Self::send_minimal_message_worst_case()
 	}
 
-	/// Returns weight that needs to be accounted when message of given size is sent (`send_message`).
+	/// Returns weight that needs to be accounted when message of given size is sent
+	/// (`send_message`).
 	fn send_message_size_overhead(message_size: u32) -> Weight {
 		let message_size_in_kb = (1024u64 + message_size as u64) / 1024;
-		let single_kb_weight = (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15;
+		let single_kb_weight =
+			(Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15;
 		message_size_in_kb * single_kb_weight
 	}
 
 	/// Returns weight overhead of message delivery transaction (`receive_messages_proof`).
 	fn receive_messages_proof_overhead() -> Weight {
-		let weight_of_two_messages_and_two_tx_overheads = Self::receive_single_message_proof().saturating_mul(2);
+		let weight_of_two_messages_and_two_tx_overheads =
+			Self::receive_single_message_proof().saturating_mul(2);
 		let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
-		weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
+		weight_of_two_messages_and_two_tx_overheads
+			.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
 	}
 
-	/// Returns weight that needs to be accounted when receiving given number of messages with message
-	/// delivery transaction (`receive_messages_proof`).
+	/// Returns weight that needs to be accounted when receiving given a number of messages with
+	/// message delivery transaction (`receive_messages_proof`).
 	fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight {
 		let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof();
 		let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof();
@@ -262,37 +306,42 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(messages as Weight)
 	}
 
-	/// Returns weight that needs to be accounted when message delivery transaction (`receive_messages_proof`)
-	/// is carrying outbound lane state proof.
+	/// Returns weight that needs to be accounted when message delivery transaction
+	/// (`receive_messages_proof`) is carrying outbound lane state proof.
 	fn receive_messages_proof_outbound_lane_state_overhead() -> Weight {
-		let weight_of_single_message_and_lane_state = Self::receive_single_message_proof_with_outbound_lane_state();
+		let weight_of_single_message_and_lane_state =
+			Self::receive_single_message_proof_with_outbound_lane_state();
 		let weight_of_single_message = Self::receive_single_message_proof();
 		weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message)
 	}
 
-	/// Returns weight overhead of delivery confirmation transaction (`receive_messages_delivery_proof`).
+	/// Returns weight overhead of delivery confirmation transaction
+	/// (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_overhead() -> Weight {
 		let weight_of_two_messages_and_two_tx_overheads =
 			Self::receive_delivery_proof_for_single_message().saturating_mul(2);
 		let weight_of_two_messages_and_single_tx_overhead =
 			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
-		weight_of_two_messages_and_two_tx_overheads.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
+		weight_of_two_messages_and_two_tx_overheads
+			.saturating_sub(weight_of_two_messages_and_single_tx_overhead)
 	}
 
-	/// Returns weight that needs to be accounted when receiving confirmations for given number of
+	/// Returns weight that needs to be accounted when receiving confirmations for given a number of
 	/// messages with delivery confirmation transaction (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight {
-		let weight_of_two_messages = Self::receive_delivery_proof_for_two_messages_by_single_relayer();
+		let weight_of_two_messages =
+			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
 		let weight_of_single_message = Self::receive_delivery_proof_for_single_message();
 		weight_of_two_messages
 			.saturating_sub(weight_of_single_message)
 			.saturating_mul(messages as Weight)
 	}
 
-	/// Returns weight that needs to be accounted when receiving confirmations for given number of
+	/// Returns weight that needs to be accounted when receiving confirmations for given a number of
 	/// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`).
 	fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight {
-		let weight_of_two_messages_by_two_relayers = Self::receive_delivery_proof_for_two_messages_by_two_relayers();
+		let weight_of_two_messages_by_two_relayers =
+			Self::receive_delivery_proof_for_two_messages_by_two_relayers();
 		let weight_of_two_messages_by_single_relayer =
 			Self::receive_delivery_proof_for_two_messages_by_single_relayer();
 		weight_of_two_messages_by_two_relayers
@@ -300,8 +349,8 @@ pub trait WeightInfoExt: WeightInfo {
 			.saturating_mul(relayers as Weight)
 	}
 
-	/// Returns weight that needs to be accounted when storage proof of given size is recieved (either in
-	/// `receive_messages_proof` or `receive_messages_delivery_proof`).
+	/// Returns weight that needs to be accounted when storage proof of given size is received
+	/// (either in `receive_messages_proof` or `receive_messages_delivery_proof`).
 	///
 	/// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof
 	/// size depends on messages count or number of entries in the unrewarded relayers set. So this
@@ -310,27 +359,39 @@ pub trait WeightInfoExt: WeightInfo {
 	/// is less than that cost).
 	fn storage_proof_size_overhead(proof_size: u32) -> Weight {
 		let proof_size_in_bytes = proof_size as Weight;
-		let byte_weight =
-			(Self::receive_single_message_proof_16_kb() - Self::receive_single_message_proof_1_kb()) / (15 * 1024);
+		let byte_weight = (Self::receive_single_message_proof_16_kb() -
+			Self::receive_single_message_proof_1_kb()) /
+			(15 * 1024);
 		proof_size_in_bytes * byte_weight
 	}
 
 	/// Returns weight of the pay-dispatch-fee operation for inbound messages.
 	///
-	/// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain option.
+	/// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain
+	/// option.
 	fn pay_inbound_dispatch_fee_overhead() -> Weight {
-		Self::receive_single_message_proof().saturating_sub(Self::receive_single_prepaid_message_proof())
+		Self::receive_single_message_proof()
+			.saturating_sub(Self::receive_single_prepaid_message_proof())
+	}
+
+	/// Returns pre-dispatch weight of single callback call.
+	///
+	/// When benchmarking the weight please take into consideration both the `OnMessageAccepted` and
+	/// `OnDeliveryConfirmed` callbacks. The method should return the greater of the two, because
+	/// it's used to estimate the weight in both contexts.
+	fn single_message_callback_overhead(db_weight: RuntimeDbWeight) -> Weight {
+		db_weight.reads_writes(1, 1)
 	}
 }
 
 impl WeightInfoExt for () {
 	fn expected_extra_storage_proof_size() -> u32 {
-		bp_rialto::EXTRA_STORAGE_PROOF_SIZE
+		EXTRA_STORAGE_PROOF_SIZE
 	}
 }
 
 impl<T: frame_system::Config> WeightInfoExt for crate::weights::RialtoWeight<T> {
 	fn expected_extra_storage_proof_size() -> u32 {
-		bp_rialto::EXTRA_STORAGE_PROOF_SIZE
+		EXTRA_STORAGE_PROOF_SIZE
 	}
 }
diff --git a/polkadot/bridges/modules/shift-session-manager/Cargo.toml b/polkadot/bridges/modules/shift-session-manager/Cargo.toml
index 6dac97ddde601eff0ddef5cb64dde0bae9ed5b17..9e3e15fddf897365bcb5c19b4709f01a52b9934f 100644
--- a/polkadot/bridges/modules/shift-session-manager/Cargo.toml
+++ b/polkadot/bridges/modules/shift-session-manager/Cargo.toml
@@ -7,20 +7,20 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
-serde = "1.0"
 
 [features]
 default = ["std"]
@@ -29,6 +29,7 @@ std = [
 	"frame-support/std",
 	"frame-system/std",
 	"pallet-session/std",
+	"scale-info/std",
 	"sp-staking/std",
 	"sp-std/std",
 ]
diff --git a/polkadot/bridges/modules/shift-session-manager/src/lib.rs b/polkadot/bridges/modules/shift-session-manager/src/lib.rs
index 3635e6223d7f7afd73a44ac6e64af31d1eaac4f8..0278580981375ae692e71f3336695ab3dc609bac 100644
--- a/polkadot/bridges/modules/shift-session-manager/src/lib.rs
+++ b/polkadot/bridges/modules/shift-session-manager/src/lib.rs
@@ -19,22 +19,33 @@
 
 #![cfg_attr(not(feature = "std"), no_std)]
 
-use frame_support::{decl_module, decl_storage};
 use sp_std::prelude::*;
 
-/// The module configuration trait.
-pub trait Config: pallet_session::Config {}
+pub use pallet::*;
 
-decl_module! {
-	/// Shift session manager pallet.
-	pub struct Module<T: Config> for enum Call where origin: T::Origin {}
-}
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
 
-decl_storage! {
-	trait Store for Pallet<T: Config> as ShiftSessionManager {
-		/// Validators of first two sessions.
-		InitialValidators: Option<Vec<T::ValidatorId>>;
-	}
+	#[pallet::config]
+	#[pallet::disable_frame_system_supertrait_check]
+	pub trait Config: pallet_session::Config {}
+
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T>(PhantomData<T>);
+
+	#[pallet::hooks]
+	impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {}
+
+	#[pallet::call]
+	impl<T: Config> Pallet<T> {}
+
+	/// Validators of first two sessions.
+	#[pallet::storage]
+	pub(super) type InitialValidators<T: Config> = StorageValue<_, Vec<T::ValidatorId>>;
 }
 
 impl<T: Config> pallet_session::SessionManager<T::ValidatorId> for Pallet<T> {
@@ -43,7 +54,7 @@ impl<T: Config> pallet_session::SessionManager<T::ValidatorId> for Pallet<T> {
 	fn new_session(session_index: sp_staking::SessionIndex) -> Option<Vec<T::ValidatorId>> {
 		// we don't want to add even more fields to genesis config => just return None
 		if session_index == 0 || session_index == 1 {
-			return None;
+			return None
 		}
 
 		// the idea that on first call (i.e. when session 1 ends) we're reading current
@@ -90,13 +101,18 @@ mod tests {
 	#![allow(clippy::from_over_into)]
 
 	use super::*;
-	use frame_support::sp_io::TestExternalities;
-	use frame_support::sp_runtime::{
-		testing::{Header, UintAuthorityId},
-		traits::{BlakeTwo256, ConvertInto, IdentityLookup},
-		Perbill, RuntimeAppPublic,
+	use frame_support::{
+		parameter_types,
+		sp_io::TestExternalities,
+		sp_runtime::{
+			testing::{Header, UintAuthorityId},
+			traits::{BlakeTwo256, ConvertInto, IdentityLookup},
+			Perbill, RuntimeAppPublic,
+		},
+		traits::GenesisBuild,
+		weights::Weight,
+		BasicExternalities,
 	};
-	use frame_support::{parameter_types, weights::Weight, BasicExternalities};
 	use sp_core::H256;
 
 	type AccountId = u64;
@@ -171,17 +187,21 @@ mod tests {
 	impl pallet_session::SessionHandler<AccountId> for TestSessionHandler {
 		const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID];
 
-		fn on_genesis_session<Ks: sp_runtime::traits::OpaqueKeys>(_validators: &[(AccountId, Ks)]) {}
+		fn on_genesis_session<Ks: sp_runtime::traits::OpaqueKeys>(_validators: &[(AccountId, Ks)]) {
+		}
 
-		fn on_new_session<Ks: sp_runtime::traits::OpaqueKeys>(_: bool, _: &[(AccountId, Ks)], _: &[(AccountId, Ks)]) {}
+		fn on_new_session<Ks: sp_runtime::traits::OpaqueKeys>(
+			_: bool,
+			_: &[(AccountId, Ks)],
+			_: &[(AccountId, Ks)],
+		) {
+		}
 
 		fn on_disabled(_: u32) {}
 	}
 
 	fn new_test_ext() -> TestExternalities {
-		let mut t = frame_system::GenesisConfig::default()
-			.build_storage::<TestRuntime>()
-			.unwrap();
+		let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
 
 		let keys = vec![
 			(1, 1, UintAuthorityId(1)),
diff --git a/polkadot/bridges/modules/token-swap/Cargo.toml b/polkadot/bridges/modules/token-swap/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..a6103f688c424f07017960bd241426c4ba63183c
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/Cargo.toml
@@ -0,0 +1,59 @@
+[package]
+name = "pallet-bridge-token-swap"
+description = "An Substrate pallet that allows parties on different chains (bridged using messages pallet) to swap their tokens"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+log = { version = "0.4.14", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0", optional = true }
+
+# Bridge dependencies
+
+bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false }
+bp-messages = { path = "../../primitives/messages", default-features = false }
+bp-runtime = { path = "../../primitives/runtime", default-features = false }
+bp-token-swap = { path = "../../primitives/token-swap", default-features = false }
+pallet-bridge-dispatch = { path = "../dispatch", default-features = false }
+pallet-bridge-messages = { path = "../messages", default-features = false }
+
+# Substrate Dependencies
+
+frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+[dev-dependencies]
+pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"bp-message-dispatch/std",
+	"bp-messages/std",
+	"bp-runtime/std",
+	"bp-token-swap/std",
+	"frame-support/std",
+	"frame-system/std",
+	"log/std",
+	"pallet-bridge-dispatch/std",
+	"pallet-bridge-messages/std",
+	"scale-info/std",
+	"serde",
+	"sp-core/std",
+	"sp-io/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
+runtime-benchmarks = [
+	"frame-benchmarking",
+]
diff --git a/polkadot/bridges/modules/token-swap/src/benchmarking.rs b/polkadot/bridges/modules/token-swap/src/benchmarking.rs
new file mode 100644
index 0000000000000000000000000000000000000000..bbc544a8b91dff1d35db2c3c55a2029d52d1f78f
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/src/benchmarking.rs
@@ -0,0 +1,195 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Token-swap pallet benchmarking.
+
+use crate::{
+	swap_account_id, target_account_at_this_chain, BridgedAccountIdOf, BridgedAccountPublicOf,
+	BridgedAccountSignatureOf, BridgedBalanceOf, Call, Pallet, ThisChainBalance,
+	TokenSwapCreationOf, TokenSwapOf,
+};
+
+use bp_token_swap::{TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType};
+use codec::Encode;
+use frame_benchmarking::{account, benchmarks_instance_pallet};
+use frame_support::{traits::Currency, Parameter};
+use frame_system::RawOrigin;
+use sp_core::H256;
+use sp_io::hashing::blake2_256;
+use sp_runtime::traits::Bounded;
+use sp_std::vec::Vec;
+
+const SEED: u32 = 0;
+
+/// Trait that must be implemented by runtime.
+pub trait Config<I: 'static>: crate::Config<I> {
+	/// Initialize environment for token swap.
+	fn initialize_environment();
+}
+
+benchmarks_instance_pallet! {
+	where_clause {
+		where
+			BridgedAccountPublicOf<T, I>: Default + Parameter,
+			BridgedAccountSignatureOf<T, I>: Default,
+	}
+
+	//
+	// Benchmarks that are used directly by the runtime.
+	//
+
+	// Benchmark `create_swap` extrinsic.
+	//
+	// This benchmark assumes that message is **NOT** actually sent. Instead we're using `send_message_weight`
+	// from the `WeightInfoExt` trait.
+	//
+	// There aren't any factors that affect `create_swap` performance, so everything
+	// is straightforward here.
+	create_swap {
+		T::initialize_environment();
+
+		let sender = funded_account::<T, I>("source_account_at_this_chain", 0);
+		let swap: TokenSwapOf<T, I> = test_swap::<T, I>(sender.clone(), true);
+		let swap_creation: TokenSwapCreationOf<T, I> = test_swap_creation::<T, I>();
+	}: create_swap(
+		RawOrigin::Signed(sender.clone()),
+		swap,
+		Box::new(swap_creation)
+	)
+	verify {
+		assert!(crate::PendingSwaps::<T, I>::contains_key(test_swap_hash::<T, I>(sender, true)));
+	}
+
+	// Benchmark `claim_swap` extrinsic with the worst possible conditions:
+	//
+	// * swap is locked until some block, so current block number is read.
+	claim_swap {
+		T::initialize_environment();
+
+		let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED);
+		crate::PendingSwaps::<T, I>::insert(
+			test_swap_hash::<T, I>(sender.clone(), false),
+			TokenSwapState::Confirmed,
+		);
+
+		let swap: TokenSwapOf<T, I> = test_swap::<T, I>(sender.clone(), false);
+		let claimer = target_account_at_this_chain::<T, I>(&swap);
+		let token_swap_account = swap_account_id::<T, I>(&swap);
+		T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::<T, I>::max_value());
+	}: claim_swap(RawOrigin::Signed(claimer), swap)
+	verify {
+		assert!(!crate::PendingSwaps::<T, I>::contains_key(test_swap_hash::<T, I>(sender, false)));
+	}
+
+	// Benchmark `cancel_swap` extrinsic with the worst possible conditions:
+	//
+	// * swap is locked until some block, so current block number is read.
+	cancel_swap {
+		T::initialize_environment();
+
+		let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED);
+		crate::PendingSwaps::<T, I>::insert(
+			test_swap_hash::<T, I>(sender.clone(), false),
+			TokenSwapState::Failed,
+		);
+
+		let swap: TokenSwapOf<T, I> = test_swap::<T, I>(sender.clone(), false);
+		let token_swap_account = swap_account_id::<T, I>(&swap);
+		T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::<T, I>::max_value());
+
+	}: cancel_swap(RawOrigin::Signed(sender.clone()), swap)
+	verify {
+		assert!(!crate::PendingSwaps::<T, I>::contains_key(test_swap_hash::<T, I>(sender, false)));
+	}
+}
+
+/// Returns test token swap.
+fn test_swap<T: Config<I>, I: 'static>(sender: T::AccountId, is_create: bool) -> TokenSwapOf<T, I> {
+	TokenSwap {
+		swap_type: TokenSwapType::LockClaimUntilBlock(
+			if is_create { 10u32.into() } else { 0u32.into() },
+			0.into(),
+		),
+		source_balance_at_this_chain: source_balance_to_swap::<T, I>(),
+		source_account_at_this_chain: sender,
+		target_balance_at_bridged_chain: target_balance_to_swap::<T, I>(),
+		target_account_at_bridged_chain: target_account_at_bridged_chain::<T, I>(),
+	}
+}
+
+/// Returns test token swap hash.
+fn test_swap_hash<T: Config<I>, I: 'static>(sender: T::AccountId, is_create: bool) -> H256 {
+	test_swap::<T, I>(sender, is_create).using_encoded(blake2_256).into()
+}
+
+/// Returns test token swap creation params.
+fn test_swap_creation<T: Config<I>, I: 'static>() -> TokenSwapCreationOf<T, I>
+where
+	BridgedAccountPublicOf<T, I>: Default,
+	BridgedAccountSignatureOf<T, I>: Default,
+{
+	TokenSwapCreation {
+		target_public_at_bridged_chain: target_public_at_bridged_chain::<T, I>(),
+		swap_delivery_and_dispatch_fee: swap_delivery_and_dispatch_fee::<T, I>(),
+		bridged_chain_spec_version: 0,
+		bridged_currency_transfer: Vec::new(),
+		bridged_currency_transfer_weight: 0,
+		bridged_currency_transfer_signature: bridged_currency_transfer_signature::<T, I>(),
+	}
+}
+
+/// Account that has some balance.
+fn funded_account<T: Config<I>, I: 'static>(name: &'static str, index: u32) -> T::AccountId {
+	let account: T::AccountId = account(name, index, SEED);
+	T::ThisCurrency::make_free_balance_be(&account, ThisChainBalance::<T, I>::max_value());
+	account
+}
+
+/// Currency transfer message fee.
+fn swap_delivery_and_dispatch_fee<T: Config<I>, I: 'static>() -> ThisChainBalance<T, I> {
+	ThisChainBalance::<T, I>::max_value() / 4u32.into()
+}
+
+/// Balance at the source chain that we're going to swap.
+fn source_balance_to_swap<T: Config<I>, I: 'static>() -> ThisChainBalance<T, I> {
+	ThisChainBalance::<T, I>::max_value() / 2u32.into()
+}
+
+/// Balance at the target chain that we're going to swap.
+fn target_balance_to_swap<T: Config<I>, I: 'static>() -> BridgedBalanceOf<T, I> {
+	BridgedBalanceOf::<T, I>::max_value() / 2u32.into()
+}
+
+/// Public key of `target_account_at_bridged_chain`.
+fn target_public_at_bridged_chain<T: Config<I>, I: 'static>() -> BridgedAccountPublicOf<T, I>
+where
+	BridgedAccountPublicOf<T, I>: Default,
+{
+	Default::default()
+}
+
+/// Signature of `target_account_at_bridged_chain` over message.
+fn bridged_currency_transfer_signature<T: Config<I>, I: 'static>() -> BridgedAccountSignatureOf<T, I>
+where
+	BridgedAccountSignatureOf<T, I>: Default,
+{
+	Default::default()
+}
+
+/// Account at the bridged chain that is participating in the swap.
+fn target_account_at_bridged_chain<T: Config<I>, I: 'static>() -> BridgedAccountIdOf<T, I> {
+	Default::default()
+}
diff --git a/polkadot/bridges/modules/token-swap/src/lib.rs b/polkadot/bridges/modules/token-swap/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..43fa13ba4bdb869bccd82e9447004b7bd670257b
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/src/lib.rs
@@ -0,0 +1,1133 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Runtime module that allows token swap between two parties acting on different chains.
+//!
+//! The swap is made using message lanes between This (where `pallet-bridge-token-swap` pallet
+//! is deployed) and some other Bridged chain. No other assumptions about the Bridged chain are
+//! made, so we don't need it to have an instance of the `pallet-bridge-token-swap` pallet deployed.
+//!
+//! There are four accounts participating in the swap:
+//!
+//! 1) account of This chain that has signed the `create_swap` transaction and has balance on This
+//! chain. We'll be referring to this account as `source_account_at_this_chain`;
+//!
+//! 2) account of the Bridged chain that is sending the `claim_swap` message from the Bridged to
+//! This chain. This account has balance on Bridged chain and is willing to swap these tokens to
+//! This chain tokens of the `source_account_at_this_chain`. We'll be referring to this account
+//! as `target_account_at_bridged_chain`;
+//!
+//! 3) account of the Bridged chain that is indirectly controlled by the
+//! `source_account_at_this_chain`. We'll be referring this account as
+//! `source_account_at_bridged_chain`;
+//!
+//! 4) account of This chain that is indirectly controlled by the `target_account_at_bridged_chain`.
+//! We'll be referring this account as `target_account_at_this_chain`.
+//!
+//! So the tokens swap is an intention of `source_account_at_this_chain` to swap his
+//! `source_balance_at_this_chain` tokens to the `target_balance_at_bridged_chain` tokens owned by
+//! `target_account_at_bridged_chain`. The swap process goes as follows:
+//!
+//! 1) the `source_account_at_this_chain` account submits the `create_swap` transaction on This
+//! chain;
+//!
+//! 2) the tokens transfer message that would transfer `target_balance_at_bridged_chain`
+//! tokens from the `target_account_at_bridged_chain` to the `source_account_at_bridged_chain`,
+//! is sent over the bridge;
+//!
+//! 3) when transfer message is delivered and dispatched, the pallet receives notification;
+//!
+//! 4) if message has been successfully dispatched, the `target_account_at_bridged_chain` sends the
+//! message that would transfer `source_balance_at_this_chain` tokens to his
+//! `target_account_at_this_chain` account;
+//!
+//! 5) if message dispatch has failed, the `source_account_at_this_chain` may submit the
+//! `cancel_swap` transaction and return his `source_balance_at_this_chain` back to his account.
+//!
+//! While swap is pending, the `source_balance_at_this_chain` tokens are owned by the special
+//! temporary `swap_account_at_this_chain` account. It is destroyed upon swap completion.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use bp_messages::{
+	source_chain::{MessagesBridge, OnDeliveryConfirmed},
+	DeliveredMessages, LaneId, MessageNonce,
+};
+use bp_runtime::{messages::DispatchFeePayment, ChainId};
+use bp_token_swap::{
+	RawBridgedTransferCall, TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType,
+};
+use codec::Encode;
+use frame_support::{
+	fail,
+	traits::{Currency, ExistenceRequirement},
+	weights::PostDispatchInfo,
+};
+use sp_core::H256;
+use sp_io::hashing::blake2_256;
+use sp_runtime::traits::{Convert, Saturating};
+use sp_std::boxed::Box;
+use weights::WeightInfo;
+
+pub use weights_ext::WeightInfoExt;
+
+#[cfg(test)]
+mod mock;
+
+#[cfg(feature = "runtime-benchmarks")]
+pub mod benchmarking;
+
+pub mod weights;
+pub mod weights_ext;
+
+pub use pallet::*;
+
+/// Name of the `PendingSwaps` storage map.
+pub const PENDING_SWAPS_MAP_NAME: &str = "PendingSwaps";
+
+// comes from #[pallet::event]
+#[allow(clippy::unused_unit)]
+#[frame_support::pallet]
+pub mod pallet {
+	use super::*;
+	use frame_support::pallet_prelude::*;
+	use frame_system::pallet_prelude::*;
+
+	#[pallet::config]
+	pub trait Config<I: 'static = ()>: frame_system::Config {
+		/// The overarching event type.
+		type Event: From<Event<Self, I>> + IsType<<Self as frame_system::Config>::Event>;
+		/// Benchmarks results from runtime we're plugged into.
+		type WeightInfo: WeightInfoExt;
+
+		/// Id of the bridge with the Bridged chain.
+		type BridgedChainId: Get<ChainId>;
+		/// The identifier of outbound message lane on This chain used to send token transfer
+		/// messages to the Bridged chain.
+		///
+		/// It is highly recommended to use dedicated lane for every instance of token swap
+		/// pallet. Messages delivery confirmation callback is implemented in the way that
+		/// for every confirmed message, there is (at least) a storage read. Which mean,
+		/// that if pallet will see unrelated confirmations, it'll just burn storage-read
+		/// weight, achieving nothing.
+		type OutboundMessageLaneId: Get<LaneId>;
+		/// Messages bridge with Bridged chain.
+		type MessagesBridge: MessagesBridge<
+			Self::AccountId,
+			<Self::ThisCurrency as Currency<Self::AccountId>>::Balance,
+			MessagePayloadOf<Self, I>,
+		>;
+
+		/// This chain Currency used in the tokens swap.
+		type ThisCurrency: Currency<Self::AccountId>;
+		/// Converter from raw hash (derived from swap) to This chain account.
+		type FromSwapToThisAccountIdConverter: Convert<H256, Self::AccountId>;
+
+		/// The chain we're bridged to.
+		type BridgedChain: bp_runtime::Chain;
+		/// Converter from raw hash (derived from Bridged chain account) to This chain account.
+		type FromBridgedToThisAccountIdConverter: Convert<H256, Self::AccountId>;
+	}
+
+	/// Tokens balance at This chain.
+	pub type ThisChainBalance<T, I> = <<T as Config<I>>::ThisCurrency as Currency<
+		<T as frame_system::Config>::AccountId,
+	>>::Balance;
+
+	/// Type of the Bridged chain.
+	pub type BridgedChainOf<T, I> = <T as Config<I>>::BridgedChain;
+	/// Tokens balance type at the Bridged chain.
+	pub type BridgedBalanceOf<T, I> = bp_runtime::BalanceOf<BridgedChainOf<T, I>>;
+	/// Account identifier type at the Bridged chain.
+	pub type BridgedAccountIdOf<T, I> = bp_runtime::AccountIdOf<BridgedChainOf<T, I>>;
+	/// Account public key type at the Bridged chain.
+	pub type BridgedAccountPublicOf<T, I> = bp_runtime::AccountPublicOf<BridgedChainOf<T, I>>;
+	/// Account signature type at the Bridged chain.
+	pub type BridgedAccountSignatureOf<T, I> = bp_runtime::SignatureOf<BridgedChainOf<T, I>>;
+
+	/// Bridge message payload used by the pallet.
+	pub type MessagePayloadOf<T, I> = bp_message_dispatch::MessagePayload<
+		<T as frame_system::Config>::AccountId,
+		BridgedAccountPublicOf<T, I>,
+		BridgedAccountSignatureOf<T, I>,
+		RawBridgedTransferCall,
+	>;
+	/// Type of `TokenSwap` used by the pallet.
+	pub type TokenSwapOf<T, I> = TokenSwap<
+		BlockNumberFor<T>,
+		ThisChainBalance<T, I>,
+		<T as frame_system::Config>::AccountId,
+		BridgedBalanceOf<T, I>,
+		BridgedAccountIdOf<T, I>,
+	>;
+	/// Type of `TokenSwapCreation` used by the pallet.
+	pub type TokenSwapCreationOf<T, I> = TokenSwapCreation<
+		BridgedAccountPublicOf<T, I>,
+		ThisChainBalance<T, I>,
+		BridgedAccountSignatureOf<T, I>,
+	>;
+
+	#[pallet::pallet]
+	#[pallet::generate_store(pub(super) trait Store)]
+	pub struct Pallet<T, I = ()>(PhantomData<(T, I)>);
+
+	#[pallet::hooks]
+	impl<T: Config<I>, I: 'static> Hooks<BlockNumberFor<T>> for Pallet<T, I> {}
+
+	#[pallet::call]
+	impl<T: Config<I>, I: 'static> Pallet<T, I>
+	where
+		BridgedAccountPublicOf<T, I>: Parameter,
+	{
+		/// Start token swap procedure.
+		///
+		/// The dispatch origin for this call must be exactly the
+		/// `swap.source_account_at_this_chain` account.
+		///
+		/// Method arguments are:
+		///
+		/// - `swap` - token swap intention;
+		/// - `swap_creation_params` - additional parameters required to start tokens swap.
+		///
+		/// The `source_account_at_this_chain` MUST have enough balance to cover both token swap and
+		/// message transfer. Message fee may be estimated using corresponding `OutboundLaneApi` of
+		/// This runtime.
+		///
+		/// **WARNING**: the submitter of this transaction is responsible for verifying:
+		///
+		/// 1) that the `swap_creation_params.bridged_currency_transfer` represents a valid token
+		/// transfer call that transfers `swap.target_balance_at_bridged_chain` to his
+		/// `swap.source_account_at_bridged_chain` account;
+		///
+		/// 2) that either the `swap.source_account_at_bridged_chain` already exists, or the
+		/// `swap.target_balance_at_bridged_chain` is above existential deposit of the Bridged
+		/// chain;
+		///
+		/// 3) the `swap_creation_params.target_public_at_bridged_chain` matches the
+		/// `swap.target_account_at_bridged_chain`;
+		///
+		/// 4) the `bridged_currency_transfer_signature` is valid and generated by the owner of
+		/// the `swap_creation_params.target_public_at_bridged_chain` account (read more
+		/// about [`CallOrigin::TargetAccount`]).
+		///
+		/// Violating rule#1 will lead to losing your `source_balance_at_this_chain` tokens.
+		/// Violating other rules will lead to losing message fees for this and other transactions +
+		/// losing fees for message transfer.
+		#[allow(clippy::boxed_local)]
+		#[pallet::weight(
+			T::WeightInfo::create_swap()
+				.saturating_add(T::WeightInfo::send_message_weight(
+					&&swap_creation_params.bridged_currency_transfer[..],
+					T::DbWeight::get(),
+				))
+			)]
+		pub fn create_swap(
+			origin: OriginFor<T>,
+			swap: TokenSwapOf<T, I>,
+			swap_creation_params: Box<TokenSwapCreationOf<T, I>>,
+		) -> DispatchResultWithPostInfo {
+			let TokenSwapCreation {
+				target_public_at_bridged_chain,
+				swap_delivery_and_dispatch_fee,
+				bridged_chain_spec_version,
+				bridged_currency_transfer,
+				bridged_currency_transfer_weight,
+				bridged_currency_transfer_signature,
+			} = *swap_creation_params;
+
+			// ensure that the `origin` is the same account that is mentioned in the `swap`
+			// intention
+			let origin_account = ensure_signed(origin)?;
+			ensure!(
+				origin_account == swap.source_account_at_this_chain,
+				Error::<T, I>::MismatchedSwapSourceOrigin,
+			);
+
+			// remember weight components
+			let base_weight = T::WeightInfo::create_swap();
+
+			// we can't exchange less than existential deposit (the temporary `swap_account` account
+			// won't be created then)
+			//
+			// the same can also happen with the `swap.bridged_balance`, but we can't check it
+			// here (without additional knowledge of the Bridged chain). So it is the `origin`
+			// responsibility to check that the swap is valid.
+			ensure!(
+				swap.source_balance_at_this_chain >= T::ThisCurrency::minimum_balance(),
+				Error::<T, I>::TooLowBalanceOnThisChain,
+			);
+
+			// if the swap is replay-protected, then we need to ensure that we have not yet passed
+			// the specified block yet
+			match swap.swap_type {
+				TokenSwapType::TemporaryTargetAccountAtBridgedChain => (),
+				TokenSwapType::LockClaimUntilBlock(block_number, _) => ensure!(
+					block_number >= frame_system::Pallet::<T>::block_number(),
+					Error::<T, I>::SwapPeriodIsFinished,
+				),
+			}
+
+			let swap_account = swap_account_id::<T, I>(&swap);
+			let actual_send_message_weight = frame_support::storage::with_transaction(|| {
+				// funds are transferred from This account to the temporary Swap account
+				let transfer_result = T::ThisCurrency::transfer(
+					&swap.source_account_at_this_chain,
+					&swap_account,
+					// saturating_add is ok, or we have the chain where single holder owns all
+					// tokens
+					swap.source_balance_at_this_chain
+						.saturating_add(swap_delivery_and_dispatch_fee),
+					// if we'll allow account to die, then he'll be unable to `cancel_claim`
+					// if something won't work
+					ExistenceRequirement::KeepAlive,
+				);
+				if let Err(err) = transfer_result {
+					log::error!(
+						target: "runtime::bridge-token-swap",
+						"Failed to transfer This chain tokens for the swap {:?} to Swap account ({:?}): {:?}",
+						swap,
+						swap_account,
+						err,
+					);
+
+					return sp_runtime::TransactionOutcome::Rollback(Err(
+						Error::<T, I>::FailedToTransferToSwapAccount,
+					))
+				}
+
+				// the transfer message is sent over the bridge. The message is supposed to be a
+				// `Currency::transfer` call on the bridged chain, but no checks are made - it is
+				// the transaction submitter to ensure it is valid.
+				let send_message_result = T::MessagesBridge::send_message(
+					bp_messages::source_chain::Sender::from(Some(swap_account.clone())),
+					T::OutboundMessageLaneId::get(),
+					bp_message_dispatch::MessagePayload {
+						spec_version: bridged_chain_spec_version,
+						weight: bridged_currency_transfer_weight,
+						origin: bp_message_dispatch::CallOrigin::TargetAccount(
+							swap_account,
+							target_public_at_bridged_chain,
+							bridged_currency_transfer_signature,
+						),
+						dispatch_fee_payment: DispatchFeePayment::AtTargetChain,
+						call: bridged_currency_transfer,
+					},
+					swap_delivery_and_dispatch_fee,
+				);
+				let sent_message = match send_message_result {
+					Ok(sent_message) => sent_message,
+					Err(err) => {
+						log::error!(
+							target: "runtime::bridge-token-swap",
+							"Failed to send token transfer message for swap {:?} to the Bridged chain: {:?}",
+							swap,
+							err,
+						);
+
+						return sp_runtime::TransactionOutcome::Rollback(Err(
+							Error::<T, I>::FailedToSendTransferMessage,
+						))
+					},
+				};
+
+				// remember that we have started the swap
+				let swap_hash = swap.using_encoded(blake2_256).into();
+				let insert_swap_result =
+					PendingSwaps::<T, I>::try_mutate(swap_hash, |maybe_state| {
+						if maybe_state.is_some() {
+							return Err(())
+						}
+
+						*maybe_state = Some(TokenSwapState::Started);
+						Ok(())
+					});
+				if insert_swap_result.is_err() {
+					log::error!(
+						target: "runtime::bridge-token-swap",
+						"Failed to start token swap {:?}: the swap is already started",
+						swap,
+					);
+
+					return sp_runtime::TransactionOutcome::Rollback(Err(
+						Error::<T, I>::SwapAlreadyStarted,
+					))
+				}
+
+				log::trace!(
+					target: "runtime::bridge-token-swap",
+					"The swap {:?} (hash {:?}) has been started",
+					swap,
+					swap_hash,
+				);
+
+				// remember that we're waiting for the transfer message delivery confirmation
+				PendingMessages::<T, I>::insert(sent_message.nonce, swap_hash);
+
+				// finally - emit the event
+				Self::deposit_event(Event::SwapStarted(swap_hash, sent_message.nonce));
+
+				sp_runtime::TransactionOutcome::Commit(Ok(sent_message.weight))
+			})?;
+
+			Ok(PostDispatchInfo {
+				actual_weight: Some(base_weight.saturating_add(actual_send_message_weight)),
+				pays_fee: Pays::Yes,
+			})
+		}
+
+		/// Claim previously reserved `source_balance_at_this_chain` by
+		/// `target_account_at_this_chain`.
+		///
+		/// **WARNING**: the correct way to call this function is to call it over the messages
+		/// bridge with dispatch origin set to
+		/// `pallet_bridge_dispatch::CallOrigin::SourceAccount(target_account_at_bridged_chain)`.
+		///
+		/// This should be called only when successful transfer confirmation has been received.
+		#[pallet::weight(T::WeightInfo::claim_swap())]
+		pub fn claim_swap(
+			origin: OriginFor<T>,
+			swap: TokenSwapOf<T, I>,
+		) -> DispatchResultWithPostInfo {
+			// ensure that the `origin` is controlled by the `swap.target_account_at_bridged_chain`
+			let origin_account = ensure_signed(origin)?;
+			let target_account_at_this_chain = target_account_at_this_chain::<T, I>(&swap);
+			ensure!(origin_account == target_account_at_this_chain, Error::<T, I>::InvalidClaimant,);
+
+			// ensure that the swap is confirmed
+			let swap_hash = swap.using_encoded(blake2_256).into();
+			let swap_state = PendingSwaps::<T, I>::get(swap_hash);
+			match swap_state {
+				Some(TokenSwapState::Started) => fail!(Error::<T, I>::SwapIsPending),
+				Some(TokenSwapState::Confirmed) => {
+					let is_claim_allowed = match swap.swap_type {
+						TokenSwapType::TemporaryTargetAccountAtBridgedChain => true,
+						TokenSwapType::LockClaimUntilBlock(block_number, _) =>
+							block_number < frame_system::Pallet::<T>::block_number(),
+					};
+
+					ensure!(is_claim_allowed, Error::<T, I>::SwapIsTemporaryLocked);
+				},
+				Some(TokenSwapState::Failed) => fail!(Error::<T, I>::SwapIsFailed),
+				None => fail!(Error::<T, I>::SwapIsInactive),
+			}
+
+			complete_claim::<T, I>(swap, swap_hash, origin_account, Event::SwapClaimed(swap_hash))
+		}
+
+		/// Return previously reserved `source_balance_at_this_chain` back to the
+		/// `source_account_at_this_chain`.
+		///
+		/// This should be called only when transfer has failed at Bridged chain and we have
+		/// received notification about that.
+		#[pallet::weight(T::WeightInfo::cancel_swap())]
+		pub fn cancel_swap(
+			origin: OriginFor<T>,
+			swap: TokenSwapOf<T, I>,
+		) -> DispatchResultWithPostInfo {
+			// ensure that the `origin` is the same account that is mentioned in the `swap`
+			// intention
+			let origin_account = ensure_signed(origin)?;
+			ensure!(
+				origin_account == swap.source_account_at_this_chain,
+				Error::<T, I>::MismatchedSwapSourceOrigin,
+			);
+
+			// ensure that the swap has failed
+			let swap_hash = swap.using_encoded(blake2_256).into();
+			let swap_state = PendingSwaps::<T, I>::get(swap_hash);
+			match swap_state {
+				Some(TokenSwapState::Started) => fail!(Error::<T, I>::SwapIsPending),
+				Some(TokenSwapState::Confirmed) => fail!(Error::<T, I>::SwapIsConfirmed),
+				Some(TokenSwapState::Failed) => {
+					// we allow canceling swap even before lock period is over - the
+					// `source_account_at_this_chain` has already paid for nothing and it is up to
+					// him to decide whether he want to try again
+				},
+				None => fail!(Error::<T, I>::SwapIsInactive),
+			}
+
+			complete_claim::<T, I>(swap, swap_hash, origin_account, Event::SwapCanceled(swap_hash))
+		}
+	}
+
+	#[pallet::event]
+	#[pallet::generate_deposit(pub(super) fn deposit_event)]
+	pub enum Event<T: Config<I>, I: 'static = ()> {
+		/// Tokens swap has been started and message has been sent to the bridged message.
+		///
+		/// The payload is the swap hash and the transfer message nonce.
+		SwapStarted(H256, MessageNonce),
+		/// Token swap has been claimed.
+		SwapClaimed(H256),
+		/// Token swap has been canceled.
+		SwapCanceled(H256),
+	}
+
+	#[pallet::error]
+	pub enum Error<T, I = ()> {
+		/// The account that has submitted the `start_claim` doesn't match the
+		/// `TokenSwap::source_account_at_this_chain`.
+		MismatchedSwapSourceOrigin,
+		/// The swap balance in This chain tokens is below existential deposit and can't be made.
+		TooLowBalanceOnThisChain,
+		/// Transfer from This chain account to temporary Swap account has failed.
+		FailedToTransferToSwapAccount,
+		/// Transfer from the temporary Swap account to the derived account of Bridged account has
+		/// failed.
+		FailedToTransferFromSwapAccount,
+		/// The message to transfer tokens on Target chain can't be sent.
+		FailedToSendTransferMessage,
+		/// The same swap is already started.
+		SwapAlreadyStarted,
+		/// Swap outcome is not yet received.
+		SwapIsPending,
+		/// Someone is trying to claim swap that has failed.
+		SwapIsFailed,
+		/// Claiming swap is not allowed.
+		///
+		/// Now the only possible case when you may get this error, is when you're trying to claim
+		/// swap with `TokenSwapType::LockClaimUntilBlock` before lock period is over.
+		SwapIsTemporaryLocked,
+		/// Swap period is finished and you can not restart it.
+		///
+		/// Now the only possible case when you may get this error, is when you're trying to start
+		/// swap with `TokenSwapType::LockClaimUntilBlock` after lock period is over.
+		SwapPeriodIsFinished,
+		/// Someone is trying to cancel swap that has been confirmed.
+		SwapIsConfirmed,
+		/// Someone is trying to claim/cancel swap that is either not started or already
+		/// claimed/canceled.
+		SwapIsInactive,
+		/// The swap claimant is invalid.
+		InvalidClaimant,
+	}
+
+	/// Pending token swaps states.
+	#[pallet::storage]
+	pub type PendingSwaps<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, H256, TokenSwapState>;
+
+	/// Pending transfer messages.
+	#[pallet::storage]
+	pub type PendingMessages<T: Config<I>, I: 'static = ()> =
+		StorageMap<_, Identity, MessageNonce, H256>;
+
+	impl<T: Config<I>, I: 'static> OnDeliveryConfirmed for Pallet<T, I> {
+		fn on_messages_delivered(lane: &LaneId, delivered_messages: &DeliveredMessages) -> Weight {
+			// we're only interested in our lane messages
+			if *lane != T::OutboundMessageLaneId::get() {
+				return 0
+			}
+
+			// so now we're dealing with our lane messages. Ideally we'll have dedicated lane
+			// and every message from `delivered_messages` is actually our transfer message.
+			// But it may be some shared lane (which is not recommended).
+			let mut reads = 0;
+			let mut writes = 0;
+			for message_nonce in delivered_messages.begin..=delivered_messages.end {
+				reads += 1;
+				if let Some(swap_hash) = PendingMessages::<T, I>::take(message_nonce) {
+					writes += 1;
+
+					let token_swap_state =
+						if delivered_messages.message_dispatch_result(message_nonce) {
+							TokenSwapState::Confirmed
+						} else {
+							TokenSwapState::Failed
+						};
+
+					log::trace!(
+						target: "runtime::bridge-token-swap",
+						"The dispatch of swap {:?} has been completed with {:?} status",
+						swap_hash,
+						token_swap_state,
+					);
+
+					PendingSwaps::<T, I>::insert(swap_hash, token_swap_state);
+				}
+			}
+
+			<T as frame_system::Config>::DbWeight::get().reads_writes(reads, writes)
+		}
+	}
+
+	/// Returns temporary account id used to lock funds during swap on This chain.
+	pub(crate) fn swap_account_id<T: Config<I>, I: 'static>(
+		swap: &TokenSwapOf<T, I>,
+	) -> T::AccountId {
+		T::FromSwapToThisAccountIdConverter::convert(swap.using_encoded(blake2_256).into())
+	}
+
+	/// Expected target account representation on This chain (aka `target_account_at_this_chain`).
+	pub(crate) fn target_account_at_this_chain<T: Config<I>, I: 'static>(
+		swap: &TokenSwapOf<T, I>,
+	) -> T::AccountId {
+		T::FromBridgedToThisAccountIdConverter::convert(bp_runtime::derive_account_id(
+			T::BridgedChainId::get(),
+			bp_runtime::SourceAccount::Account(swap.target_account_at_bridged_chain.clone()),
+		))
+	}
+
+	/// Complete claim with given outcome.
+	pub(crate) fn complete_claim<T: Config<I>, I: 'static>(
+		swap: TokenSwapOf<T, I>,
+		swap_hash: H256,
+		destination_account: T::AccountId,
+		event: Event<T, I>,
+	) -> DispatchResultWithPostInfo {
+		let swap_account = swap_account_id::<T, I>(&swap);
+		frame_support::storage::with_transaction(|| {
+			// funds are transferred from the temporary Swap account to the destination account
+			let transfer_result = T::ThisCurrency::transfer(
+				&swap_account,
+				&destination_account,
+				swap.source_balance_at_this_chain,
+				ExistenceRequirement::AllowDeath,
+			);
+			if let Err(err) = transfer_result {
+				log::error!(
+					target: "runtime::bridge-token-swap",
+					"Failed to transfer This chain tokens for the swap {:?} from the Swap account {:?} to {:?}: {:?}",
+					swap,
+					swap_account,
+					destination_account,
+					err,
+				);
+
+				return sp_runtime::TransactionOutcome::Rollback(Err(
+					Error::<T, I>::FailedToTransferFromSwapAccount.into(),
+				))
+			}
+
+			log::trace!(
+				target: "runtime::bridge-token-swap",
+				"The swap {:?} (hash {:?}) has been completed with {} status",
+				swap,
+				swap_hash,
+				match event {
+					Event::SwapClaimed(_) => "claimed",
+					Event::SwapCanceled(_) => "canceled",
+					_ => "<unknown>",
+				},
+			);
+
+			// forget about swap
+			PendingSwaps::<T, I>::remove(swap_hash);
+
+			// finally - emit the event
+			Pallet::<T, I>::deposit_event(event);
+
+			sp_runtime::TransactionOutcome::Commit(Ok(().into()))
+		})
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::mock::*;
+	use frame_support::{assert_noop, assert_ok};
+
+	const CAN_START_BLOCK_NUMBER: u64 = 10;
+	const CAN_CLAIM_BLOCK_NUMBER: u64 = CAN_START_BLOCK_NUMBER + 1;
+
+	const BRIDGED_CHAIN_ACCOUNT: BridgedAccountId = 3;
+	const BRIDGED_CHAIN_SPEC_VERSION: u32 = 4;
+	const BRIDGED_CHAIN_CALL_WEIGHT: Balance = 5;
+
+	fn bridged_chain_account_public() -> BridgedAccountPublic {
+		1.into()
+	}
+
+	fn bridged_chain_account_signature() -> BridgedAccountSignature {
+		sp_runtime::testing::TestSignature(2, Vec::new())
+	}
+
+	fn test_swap() -> TokenSwapOf<TestRuntime, ()> {
+		bp_token_swap::TokenSwap {
+			swap_type: TokenSwapType::LockClaimUntilBlock(CAN_START_BLOCK_NUMBER, 0.into()),
+			source_balance_at_this_chain: 100,
+			source_account_at_this_chain: THIS_CHAIN_ACCOUNT,
+			target_balance_at_bridged_chain: 200,
+			target_account_at_bridged_chain: BRIDGED_CHAIN_ACCOUNT,
+		}
+	}
+
+	fn test_swap_creation() -> TokenSwapCreationOf<TestRuntime, ()> {
+		TokenSwapCreation {
+			target_public_at_bridged_chain: bridged_chain_account_public(),
+			swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE,
+			bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION,
+			bridged_currency_transfer: test_transfer(),
+			bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT,
+			bridged_currency_transfer_signature: bridged_chain_account_signature(),
+		}
+	}
+
+	fn test_swap_hash() -> H256 {
+		test_swap().using_encoded(blake2_256).into()
+	}
+
+	fn test_transfer() -> RawBridgedTransferCall {
+		vec![OK_TRANSFER_CALL]
+	}
+
+	fn start_test_swap() {
+		assert_ok!(Pallet::<TestRuntime>::create_swap(
+			Origin::signed(THIS_CHAIN_ACCOUNT),
+			test_swap(),
+			Box::new(TokenSwapCreation {
+				target_public_at_bridged_chain: bridged_chain_account_public(),
+				swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE,
+				bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION,
+				bridged_currency_transfer: test_transfer(),
+				bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT,
+				bridged_currency_transfer_signature: bridged_chain_account_signature(),
+			}),
+		));
+	}
+
+	fn receive_test_swap_confirmation(success: bool) {
+		Pallet::<TestRuntime, ()>::on_messages_delivered(
+			&OutboundMessageLaneId::get(),
+			&DeliveredMessages::new(MESSAGE_NONCE, success),
+		);
+	}
+
+	#[test]
+	fn create_swap_fails_if_origin_is_incorrect() {
+		run_test(|| {
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT + 1),
+					test_swap(),
+					Box::new(test_swap_creation()),
+				),
+				Error::<TestRuntime, ()>::MismatchedSwapSourceOrigin
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_fails_if_this_chain_balance_is_below_existential_deposit() {
+		run_test(|| {
+			let mut swap = test_swap();
+			swap.source_balance_at_this_chain = ExistentialDeposit::get() - 1;
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT),
+					swap,
+					Box::new(test_swap_creation()),
+				),
+				Error::<TestRuntime, ()>::TooLowBalanceOnThisChain
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_fails_if_currency_transfer_to_swap_account_fails() {
+		run_test(|| {
+			let mut swap = test_swap();
+			swap.source_balance_at_this_chain = THIS_CHAIN_ACCOUNT_BALANCE + 1;
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT),
+					swap,
+					Box::new(test_swap_creation()),
+				),
+				Error::<TestRuntime, ()>::FailedToTransferToSwapAccount
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_fails_if_send_message_fails() {
+		run_test(|| {
+			let mut transfer = test_transfer();
+			transfer[0] = BAD_TRANSFER_CALL;
+			let mut swap_creation = test_swap_creation();
+			swap_creation.bridged_currency_transfer = transfer;
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT),
+					test_swap(),
+					Box::new(swap_creation),
+				),
+				Error::<TestRuntime, ()>::FailedToSendTransferMessage
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_fails_if_swap_is_active() {
+		run_test(|| {
+			assert_ok!(Pallet::<TestRuntime>::create_swap(
+				Origin::signed(THIS_CHAIN_ACCOUNT),
+				test_swap(),
+				Box::new(test_swap_creation()),
+			));
+
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT),
+					test_swap(),
+					Box::new(test_swap_creation()),
+				),
+				Error::<TestRuntime, ()>::SwapAlreadyStarted
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_fails_if_trying_to_start_swap_after_lock_period_is_finished() {
+		run_test(|| {
+			frame_system::Pallet::<TestRuntime>::set_block_number(CAN_START_BLOCK_NUMBER + 1);
+			assert_noop!(
+				Pallet::<TestRuntime>::create_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT),
+					test_swap(),
+					Box::new(test_swap_creation()),
+				),
+				Error::<TestRuntime, ()>::SwapPeriodIsFinished
+			);
+		});
+	}
+
+	#[test]
+	fn create_swap_succeeds_if_trying_to_start_swap_at_lock_period_end() {
+		run_test(|| {
+			frame_system::Pallet::<TestRuntime>::set_block_number(CAN_START_BLOCK_NUMBER);
+			assert_ok!(Pallet::<TestRuntime>::create_swap(
+				Origin::signed(THIS_CHAIN_ACCOUNT),
+				test_swap(),
+				Box::new(test_swap_creation()),
+			));
+		});
+	}
+
+	#[test]
+	fn create_swap_succeeds() {
+		run_test(|| {
+			frame_system::Pallet::<TestRuntime>::set_block_number(1);
+			frame_system::Pallet::<TestRuntime>::reset_events();
+
+			assert_ok!(Pallet::<TestRuntime>::create_swap(
+				Origin::signed(THIS_CHAIN_ACCOUNT),
+				test_swap(),
+				Box::new(test_swap_creation()),
+			));
+
+			let swap_hash = test_swap_hash();
+			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), Some(TokenSwapState::Started));
+			assert_eq!(PendingMessages::<TestRuntime>::get(MESSAGE_NONCE), Some(swap_hash));
+			assert_eq!(
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
+				test_swap().source_balance_at_this_chain + SWAP_DELIVERY_AND_DISPATCH_FEE,
+			);
+			assert!(
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapStarted(
+						swap_hash,
+						MESSAGE_NONCE,
+					))),
+				"Missing SwapStarted event: {:?}",
+				frame_system::Pallet::<TestRuntime>::events(),
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_if_origin_is_incorrect() {
+		run_test(|| {
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(
+						1 + target_account_at_this_chain::<TestRuntime, ()>(&test_swap())
+					),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::InvalidClaimant
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_if_swap_is_pending() {
+		run_test(|| {
+			PendingSwaps::<TestRuntime, ()>::insert(test_swap_hash(), TokenSwapState::Started);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::SwapIsPending
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_if_swap_is_failed() {
+		run_test(|| {
+			PendingSwaps::<TestRuntime, ()>::insert(test_swap_hash(), TokenSwapState::Failed);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::SwapIsFailed
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_if_swap_is_inactive() {
+		run_test(|| {
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::SwapIsInactive
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_if_currency_transfer_from_swap_account_fails() {
+		run_test(|| {
+			frame_system::Pallet::<TestRuntime>::set_block_number(CAN_CLAIM_BLOCK_NUMBER);
+			PendingSwaps::<TestRuntime, ()>::insert(test_swap_hash(), TokenSwapState::Confirmed);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::FailedToTransferFromSwapAccount
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_fails_before_lock_period_is_completed() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(true);
+
+			frame_system::Pallet::<TestRuntime>::set_block_number(CAN_CLAIM_BLOCK_NUMBER - 1);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::claim_swap(
+					Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+					test_swap(),
+				),
+				Error::<TestRuntime, ()>::SwapIsTemporaryLocked
+			);
+		});
+	}
+
+	#[test]
+	fn claim_swap_succeeds() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(true);
+
+			frame_system::Pallet::<TestRuntime>::set_block_number(CAN_CLAIM_BLOCK_NUMBER);
+			frame_system::Pallet::<TestRuntime>::reset_events();
+
+			assert_ok!(Pallet::<TestRuntime>::claim_swap(
+				Origin::signed(target_account_at_this_chain::<TestRuntime, ()>(&test_swap())),
+				test_swap(),
+			));
+
+			let swap_hash = test_swap_hash();
+			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), None);
+			assert_eq!(
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
+				0,
+			);
+			assert_eq!(
+				pallet_balances::Pallet::<TestRuntime>::free_balance(
+					&target_account_at_this_chain::<TestRuntime, ()>(&test_swap()),
+				),
+				test_swap().source_balance_at_this_chain,
+			);
+			assert!(
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapClaimed(swap_hash,))),
+				"Missing SwapClaimed event: {:?}",
+				frame_system::Pallet::<TestRuntime>::events(),
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_fails_if_origin_is_incorrect() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(false);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::cancel_swap(
+					Origin::signed(THIS_CHAIN_ACCOUNT + 1),
+					test_swap()
+				),
+				Error::<TestRuntime, ()>::MismatchedSwapSourceOrigin
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_fails_if_swap_is_pending() {
+		run_test(|| {
+			start_test_swap();
+
+			assert_noop!(
+				Pallet::<TestRuntime>::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()),
+				Error::<TestRuntime, ()>::SwapIsPending
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_fails_if_swap_is_confirmed() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(true);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()),
+				Error::<TestRuntime, ()>::SwapIsConfirmed
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_fails_if_swap_is_inactive() {
+		run_test(|| {
+			assert_noop!(
+				Pallet::<TestRuntime>::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()),
+				Error::<TestRuntime, ()>::SwapIsInactive
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_fails_if_currency_transfer_from_swap_account_fails() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(false);
+			let _ = pallet_balances::Pallet::<TestRuntime>::slash(
+				&swap_account_id::<TestRuntime, ()>(&test_swap()),
+				test_swap().source_balance_at_this_chain,
+			);
+
+			assert_noop!(
+				Pallet::<TestRuntime>::cancel_swap(Origin::signed(THIS_CHAIN_ACCOUNT), test_swap()),
+				Error::<TestRuntime, ()>::FailedToTransferFromSwapAccount
+			);
+		});
+	}
+
+	#[test]
+	fn cancel_swap_succeeds() {
+		run_test(|| {
+			start_test_swap();
+			receive_test_swap_confirmation(false);
+
+			frame_system::Pallet::<TestRuntime>::set_block_number(1);
+			frame_system::Pallet::<TestRuntime>::reset_events();
+
+			assert_ok!(Pallet::<TestRuntime>::cancel_swap(
+				Origin::signed(THIS_CHAIN_ACCOUNT),
+				test_swap()
+			));
+
+			let swap_hash = test_swap_hash();
+			assert_eq!(PendingSwaps::<TestRuntime>::get(swap_hash), None);
+			assert_eq!(
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&swap_account_id::<
+					TestRuntime,
+					(),
+				>(&test_swap())),
+				0,
+			);
+			assert_eq!(
+				pallet_balances::Pallet::<TestRuntime>::free_balance(&THIS_CHAIN_ACCOUNT),
+				THIS_CHAIN_ACCOUNT_BALANCE - SWAP_DELIVERY_AND_DISPATCH_FEE,
+			);
+			assert!(
+				frame_system::Pallet::<TestRuntime>::events().iter().any(|e| e.event ==
+					crate::mock::Event::TokenSwap(crate::Event::SwapCanceled(swap_hash,))),
+				"Missing SwapCanceled event: {:?}",
+				frame_system::Pallet::<TestRuntime>::events(),
+			);
+		});
+	}
+
+	#[test]
+	fn messages_delivery_confirmations_are_accepted() {
+		run_test(|| {
+			start_test_swap();
+			assert_eq!(
+				PendingMessages::<TestRuntime, ()>::get(MESSAGE_NONCE),
+				Some(test_swap_hash())
+			);
+			assert_eq!(
+				PendingSwaps::<TestRuntime, ()>::get(test_swap_hash()),
+				Some(TokenSwapState::Started)
+			);
+
+			// when unrelated messages are delivered
+			let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 2, true);
+			messages.note_dispatched_message(false);
+			Pallet::<TestRuntime, ()>::on_messages_delivered(
+				&OutboundMessageLaneId::get(),
+				&messages,
+			);
+			assert_eq!(
+				PendingMessages::<TestRuntime, ()>::get(MESSAGE_NONCE),
+				Some(test_swap_hash())
+			);
+			assert_eq!(
+				PendingSwaps::<TestRuntime, ()>::get(test_swap_hash()),
+				Some(TokenSwapState::Started)
+			);
+
+			// when message we're interested in is accompanied by a bunch of other messages
+			let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 1, false);
+			messages.note_dispatched_message(true);
+			messages.note_dispatched_message(false);
+			Pallet::<TestRuntime, ()>::on_messages_delivered(
+				&OutboundMessageLaneId::get(),
+				&messages,
+			);
+			assert_eq!(PendingMessages::<TestRuntime, ()>::get(MESSAGE_NONCE), None);
+			assert_eq!(
+				PendingSwaps::<TestRuntime, ()>::get(test_swap_hash()),
+				Some(TokenSwapState::Confirmed)
+			);
+		});
+	}
+}
diff --git a/polkadot/bridges/modules/token-swap/src/mock.rs b/polkadot/bridges/modules/token-swap/src/mock.rs
new file mode 100644
index 0000000000000000000000000000000000000000..63edb323e1a4c85f350ad6cbf8d625ec6a19d38d
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/src/mock.rs
@@ -0,0 +1,187 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate as pallet_bridge_token_swap;
+use crate::MessagePayloadOf;
+
+use bp_messages::{
+	source_chain::{MessagesBridge, SendMessageArtifacts},
+	LaneId, MessageNonce,
+};
+use bp_runtime::ChainId;
+use frame_support::weights::Weight;
+use sp_core::H256;
+use sp_runtime::{
+	testing::Header as SubstrateHeader,
+	traits::{BlakeTwo256, IdentityLookup},
+	Perbill,
+};
+
+pub type AccountId = u64;
+pub type Balance = u64;
+pub type Block = frame_system::mocking::MockBlock<TestRuntime>;
+pub type BridgedAccountId = u64;
+pub type BridgedAccountPublic = sp_runtime::testing::UintAuthorityId;
+pub type BridgedAccountSignature = sp_runtime::testing::TestSignature;
+pub type BridgedBalance = u64;
+pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic<TestRuntime>;
+
+pub const OK_TRANSFER_CALL: u8 = 1;
+pub const BAD_TRANSFER_CALL: u8 = 2;
+pub const MESSAGE_NONCE: MessageNonce = 3;
+
+pub const THIS_CHAIN_ACCOUNT: AccountId = 1;
+pub const THIS_CHAIN_ACCOUNT_BALANCE: Balance = 100_000;
+
+pub const SWAP_DELIVERY_AND_DISPATCH_FEE: Balance = 1;
+
+frame_support::construct_runtime! {
+	pub enum TestRuntime where
+		Block = Block,
+		NodeBlock = Block,
+		UncheckedExtrinsic = UncheckedExtrinsic,
+	{
+		System: frame_system::{Pallet, Call, Config, Storage, Event<T>},
+		Balances: pallet_balances::{Pallet, Call, Event<T>},
+		TokenSwap: pallet_bridge_token_swap::{Pallet, Call, Event<T>},
+	}
+}
+
+frame_support::parameter_types! {
+	pub const BlockHashCount: u64 = 250;
+	pub const MaximumBlockWeight: Weight = 1024;
+	pub const MaximumBlockLength: u32 = 2 * 1024;
+	pub const AvailableBlockRatio: Perbill = Perbill::one();
+}
+
+impl frame_system::Config for TestRuntime {
+	type Origin = Origin;
+	type Index = u64;
+	type Call = Call;
+	type BlockNumber = u64;
+	type Hash = H256;
+	type Hashing = BlakeTwo256;
+	type AccountId = AccountId;
+	type Lookup = IdentityLookup<Self::AccountId>;
+	type Header = SubstrateHeader;
+	type Event = Event;
+	type BlockHashCount = BlockHashCount;
+	type Version = ();
+	type PalletInfo = PalletInfo;
+	type AccountData = pallet_balances::AccountData<Balance>;
+	type OnNewAccount = ();
+	type OnKilledAccount = ();
+	type BaseCallFilter = frame_support::traits::Everything;
+	type SystemWeightInfo = ();
+	type BlockWeights = ();
+	type BlockLength = ();
+	type DbWeight = ();
+	type SS58Prefix = ();
+	type OnSetCode = ();
+}
+
+frame_support::parameter_types! {
+	pub const ExistentialDeposit: u64 = 10;
+	pub const MaxReserves: u32 = 50;
+}
+
+impl pallet_balances::Config for TestRuntime {
+	type MaxLocks = ();
+	type Balance = Balance;
+	type DustRemoval = ();
+	type Event = Event;
+	type ExistentialDeposit = ExistentialDeposit;
+	type AccountStore = frame_system::Pallet<TestRuntime>;
+	type WeightInfo = ();
+	type MaxReserves = MaxReserves;
+	type ReserveIdentifier = [u8; 8];
+}
+
+frame_support::parameter_types! {
+	pub const BridgedChainId: ChainId = *b"inst";
+	pub const OutboundMessageLaneId: LaneId = *b"lane";
+}
+
+impl pallet_bridge_token_swap::Config for TestRuntime {
+	type Event = Event;
+	type WeightInfo = ();
+
+	type BridgedChainId = BridgedChainId;
+	type OutboundMessageLaneId = OutboundMessageLaneId;
+	type MessagesBridge = TestMessagesBridge;
+
+	type ThisCurrency = pallet_balances::Pallet<TestRuntime>;
+	type FromSwapToThisAccountIdConverter = TestAccountConverter;
+
+	type BridgedChain = BridgedChain;
+	type FromBridgedToThisAccountIdConverter = TestAccountConverter;
+}
+
+pub struct BridgedChain;
+
+impl bp_runtime::Chain for BridgedChain {
+	type BlockNumber = u64;
+	type Hash = H256;
+	type Hasher = BlakeTwo256;
+	type Header = sp_runtime::generic::Header<u64, BlakeTwo256>;
+
+	type AccountId = BridgedAccountId;
+	type Balance = BridgedBalance;
+	type Index = u64;
+	type Signature = BridgedAccountSignature;
+}
+
+pub struct TestMessagesBridge;
+
+impl MessagesBridge<AccountId, Balance, MessagePayloadOf<TestRuntime, ()>> for TestMessagesBridge {
+	type Error = ();
+
+	fn send_message(
+		sender: frame_system::RawOrigin<AccountId>,
+		lane: LaneId,
+		message: MessagePayloadOf<TestRuntime, ()>,
+		delivery_and_dispatch_fee: Balance,
+	) -> Result<SendMessageArtifacts, Self::Error> {
+		assert_ne!(sender, frame_system::RawOrigin::Signed(THIS_CHAIN_ACCOUNT));
+		assert_eq!(lane, OutboundMessageLaneId::get());
+		assert_eq!(delivery_and_dispatch_fee, SWAP_DELIVERY_AND_DISPATCH_FEE);
+		match message.call[0] {
+			OK_TRANSFER_CALL => Ok(SendMessageArtifacts { nonce: MESSAGE_NONCE, weight: 0 }),
+			BAD_TRANSFER_CALL => Err(()),
+			_ => unreachable!(),
+		}
+	}
+}
+
+pub struct TestAccountConverter;
+
+impl sp_runtime::traits::Convert<H256, AccountId> for TestAccountConverter {
+	fn convert(hash: H256) -> AccountId {
+		hash.to_low_u64_ne()
+	}
+}
+
+/// Run pallet test.
+pub fn run_test<T>(test: impl FnOnce() -> T) -> T {
+	let mut t = frame_system::GenesisConfig::default().build_storage::<TestRuntime>().unwrap();
+	pallet_balances::GenesisConfig::<TestRuntime> {
+		balances: vec![(THIS_CHAIN_ACCOUNT, THIS_CHAIN_ACCOUNT_BALANCE)],
+	}
+	.assimilate_storage(&mut t)
+	.unwrap();
+	let mut ext = sp_io::TestExternalities::new(t);
+	ext.execute_with(test)
+}
diff --git a/polkadot/bridges/modules/token-swap/src/weights.rs b/polkadot/bridges/modules/token-swap/src/weights.rs
new file mode 100644
index 0000000000000000000000000000000000000000..06cb6b85cf336d4d308309c4aac45f6f2712b495
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/src/weights.rs
@@ -0,0 +1,93 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Autogenerated weights for `pallet_bridge_token_swap`
+//!
+//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev
+//! DATE: 2021-10-06, STEPS: 50, REPEAT: 20
+//! LOW RANGE: [], HIGH RANGE: []
+//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled
+//! CHAIN: Some("dev"), DB CACHE: 128
+
+// Executed Command:
+// target/release/millau-bridge-node
+// benchmark
+// --chain=dev
+// --steps=50
+// --repeat=20
+// --pallet=pallet_bridge_token_swap
+// --extrinsic=*
+// --execution=wasm
+// --wasm-execution=Compiled
+// --heap-pages=4096
+// --output=./modules/token-swap/src/weights.rs
+// --template=./.maintain/millau-weight-template.hbs
+
+#![allow(clippy::all)]
+#![allow(unused_parens)]
+#![allow(unused_imports)]
+
+use frame_support::{
+	traits::Get,
+	weights::{constants::RocksDbWeight, Weight},
+};
+use sp_std::marker::PhantomData;
+
+/// Weight functions needed for `pallet_bridge_token_swap`.
+pub trait WeightInfo {
+	fn create_swap() -> Weight;
+	fn claim_swap() -> Weight;
+	fn cancel_swap() -> Weight;
+}
+
+/// Weights for `pallet_bridge_token_swap` using the Millau node and recommended hardware.
+pub struct MillauWeight<T>(PhantomData<T>);
+impl<T: frame_system::Config> WeightInfo for MillauWeight<T> {
+	fn create_swap() -> Weight {
+		(116_040_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(3 as Weight))
+			.saturating_add(T::DbWeight::get().writes(4 as Weight))
+	}
+	fn claim_swap() -> Weight {
+		(102_882_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(3 as Weight))
+			.saturating_add(T::DbWeight::get().writes(3 as Weight))
+	}
+	fn cancel_swap() -> Weight {
+		(99_434_000 as Weight)
+			.saturating_add(T::DbWeight::get().reads(3 as Weight))
+			.saturating_add(T::DbWeight::get().writes(3 as Weight))
+	}
+}
+
+// For backwards compatibility and tests
+impl WeightInfo for () {
+	fn create_swap() -> Weight {
+		(116_040_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(3 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(4 as Weight))
+	}
+	fn claim_swap() -> Weight {
+		(102_882_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(3 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(3 as Weight))
+	}
+	fn cancel_swap() -> Weight {
+		(99_434_000 as Weight)
+			.saturating_add(RocksDbWeight::get().reads(3 as Weight))
+			.saturating_add(RocksDbWeight::get().writes(3 as Weight))
+	}
+}
diff --git a/polkadot/bridges/modules/token-swap/src/weights_ext.rs b/polkadot/bridges/modules/token-swap/src/weights_ext.rs
new file mode 100644
index 0000000000000000000000000000000000000000..2d27c76cbe68564660b5b547ed2eb8aa2a882c7c
--- /dev/null
+++ b/polkadot/bridges/modules/token-swap/src/weights_ext.rs
@@ -0,0 +1,42 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Weight-related utilities.
+
+use crate::weights::WeightInfo;
+
+use bp_runtime::Size;
+use frame_support::weights::{RuntimeDbWeight, Weight};
+
+/// Extended weight info.
+pub trait WeightInfoExt: WeightInfo {
+	// Functions that are directly mapped to extrinsics weights.
+
+	/// Weight of message send extrinsic.
+	fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight;
+}
+
+impl WeightInfoExt for () {
+	fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight {
+		<() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight)
+	}
+}
+
+impl<T: frame_system::Config> WeightInfoExt for crate::weights::MillauWeight<T> {
+	fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight {
+		<() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight)
+	}
+}
diff --git a/polkadot/bridges/primitives/chain-kusama/Cargo.toml b/polkadot/bridges/primitives/chain-kusama/Cargo.toml
index 70ff3b844df07a295c098541f933c82d226cf542..6ff860357c7c451524b106be643d0bbe6e38ebb1 100644
--- a/polkadot/bridges/primitives/chain-kusama/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-kusama/Cargo.toml
@@ -7,15 +7,20 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
+smallvec = "1.7"
 
 # Bridge Dependencies
+
 bp-messages = { path = "../messages", default-features = false }
 bp-polkadot-core = { path = "../polkadot-core", default-features = false }
 bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
@@ -23,6 +28,8 @@ std = [
 	"bp-messages/std",
 	"bp-polkadot-core/std",
 	"bp-runtime/std",
+	"frame-support/std",
 	"sp-api/std",
 	"sp-std/std",
+	"sp-version/std",
 ]
diff --git a/polkadot/bridges/primitives/chain-kusama/src/lib.rs b/polkadot/bridges/primitives/chain-kusama/src/lib.rs
index e5ab47259e54c2fe31d6d0441fabf49460037f1a..9a6eb66d22865be745cf65c15e85a7ebd2252667 100644
--- a/polkadot/bridges/primitives/chain-kusama/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-kusama/src/lib.rs
@@ -21,13 +21,46 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
+use sp_version::RuntimeVersion;
 
 pub use bp_polkadot_core::*;
 
 /// Kusama Chain
 pub type Kusama = PolkadotLike;
 
+// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo.
+pub const VERSION: RuntimeVersion = RuntimeVersion {
+	spec_name: sp_version::create_runtime_str!("kusama"),
+	impl_name: sp_version::create_runtime_str!("parity-kusama"),
+	authoring_version: 2,
+	spec_version: 9100,
+	impl_version: 0,
+	apis: sp_version::create_apis_vec![[]],
+	transaction_version: 5,
+};
+
+// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo.
+pub struct WeightToFee;
+impl WeightToFeePolynomial for WeightToFee {
+	type Balance = Balance;
+	fn polynomial() -> WeightToFeeCoefficients<Self::Balance> {
+		const CENTS: Balance = 1_000_000_000_000 / 30_000;
+		// in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT:
+		let p = CENTS;
+		let q = 10 * Balance::from(ExtrinsicBaseWeight::get());
+		smallvec::smallvec![WeightToFeeCoefficient {
+			degree: 1,
+			negative: false,
+			coeff_frac: Perbill::from_rational(p % q, q),
+			coeff_integer: p / q,
+		}]
+	}
+}
+
 // We use this to get the account on Kusama (target) which is derived from Polkadot's (source)
 // account.
 pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
@@ -35,27 +68,53 @@ pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount<AccountId>)
 	AccountIdConverter::convert(encoded_id)
 }
 
+/// Per-byte fee for Kusama transactions.
+pub const TRANSACTION_BYTE_FEE: Balance = 10 * 1_000_000_000_000 / 30_000 / 1_000;
+
+/// Existential deposit on Kusama.
+pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 / 30_000;
+
+/// The target length of a session (how often authorities change) on Kusama measured in of number of
+/// blocks.
+///
+/// Note that since this is a target sessions may change before/after this time depending on network
+/// conditions.
+pub const SESSION_LENGTH: BlockNumber = time_units::HOURS;
+
+/// Name of the With-Polkadot messages pallet instance in the Kusama runtime.
+pub const WITH_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages";
+
+/// Name of the DOT->KSM conversion rate stored in the Kusama runtime.
+pub const POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME: &str =
+	"PolkadotToKusamaConversionRate";
+
 /// Name of the `KusamaFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized";
 /// Name of the `KusamaFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_is_known_header";
 
-/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method.
 pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details";
 /// Name of the `ToKusamaOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_generated_nonce";
+pub const TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToKusamaOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToKusamaOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "ToKusamaOutboundLaneApi_latest_received_nonce";
+pub const TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToKusamaOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromKusamaInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_received_nonce";
+pub const FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromKusamaInboundLaneApi_latest_received_nonce";
 /// Name of the `FromKusamaInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromKusamaInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromKusamaInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromKusamaInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str = "FromKusamaInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_KUSAMA_UNREWARDED_RELAYERS_STATE: &str =
+	"FromKusamaInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Kusama headers.
diff --git a/polkadot/bridges/primitives/chain-millau/Cargo.toml b/polkadot/bridges/primitives/chain-millau/Cargo.toml
index 3628f9092091ab163e8b88f1612422f857068332..f1e17fe96f5ac713214c6d730aae24de87c9c907 100644
--- a/polkadot/bridges/primitives/chain-millau/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-millau/Cargo.toml
@@ -16,19 +16,20 @@ fixed-hash = { version = "0.7.0", default-features = false }
 hash256-std-hasher = { version = "0.15.2", default-features = false }
 impl-codec = { version = "0.5.1", default-features = false }
 impl-serde = { version = "0.3.1", optional = true }
-parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] }
-serde = { version = "1.0.101", optional = true, features = ["derive"] }
+parity-util-mem = { version = "0.10", default-features = false, features = ["primitive-types"] }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+serde = { version = "1.0", optional = true, features = ["derive"] }
 
 # Substrate Based Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
@@ -42,6 +43,7 @@ std = [
 	"impl-codec/std",
 	"impl-serde",
 	"parity-util-mem/std",
+	"scale-info/std",
 	"serde",
 	"sp-api/std",
 	"sp-core/std",
diff --git a/polkadot/bridges/primitives/chain-millau/src/lib.rs b/polkadot/bridges/primitives/chain-millau/src/lib.rs
index 0efc54e96e6ad8532f99e067e75f3ed45bb5bef4..0092f7092bc0d309c2d2af0cf425633338f586e6 100644
--- a/polkadot/bridges/primitives/chain-millau/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-millau/src/lib.rs
@@ -25,14 +25,14 @@ mod millau_hash;
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
 use bp_runtime::Chain;
 use frame_support::{
-	weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight},
+	weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight},
 	Parameter, RuntimeDebug,
 };
 use frame_system::limits;
+use scale_info::TypeInfo;
 use sp_core::Hasher as HasherT;
-use sp_runtime::traits::Convert;
 use sp_runtime::{
-	traits::{IdentifyAccount, Verify},
+	traits::{Convert, IdentifyAccount, Verify},
 	MultiSignature, MultiSigner, Perbill,
 };
 use sp_std::prelude::*;
@@ -77,29 +77,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 1024;
 /// Weight of single regular message delivery transaction on Millau chain.
 ///
 /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
-/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
-/// The message must have dispatch weight set to zero. The result then must be rounded up to account
-/// possible future runtime upgrades.
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
 pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
 
 /// Increase of delivery transaction weight on Millau chain with every additional message byte.
 ///
-/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The
-/// result then must be rounded up to account possible future runtime upgrades.
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
 pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
 
 /// Maximal weight of single message delivery confirmation transaction on Millau chain.
 ///
-/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation
-/// for the case when single message is confirmed. The result then must be rounded up to account possible future
-/// runtime upgrades.
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
 pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Millau chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 /// The target length of a session (how often authorities change) on Millau measured in of number of
@@ -130,7 +133,7 @@ pub type BlockNumber = u64;
 /// Hash type used in Millau.
 pub type Hash = <BlakeTwoAndKeccak256 as HasherT>::Out;
 
-/// The type of an object that can produce hashes on Millau.
+/// Type of object that can produce hashes on Millau.
 pub type Hasher = BlakeTwoAndKeccak256;
 
 /// The header type used by Millau.
@@ -149,6 +152,12 @@ pub type AccountSigner = MultiSigner;
 /// Balance of an account.
 pub type Balance = u64;
 
+/// Index of a transaction in the chain.
+pub type Index = u32;
+
+/// Weight-to-Fee type used by Millau.
+pub type WeightToFee = IdentityFee<Balance>;
+
 /// Millau chain.
 #[derive(RuntimeDebug)]
 pub struct Millau;
@@ -158,10 +167,15 @@ impl Chain for Millau {
 	type Hash = Hash;
 	type Hasher = Hasher;
 	type Header = Header;
+
+	type AccountId = AccountId;
+	type Balance = Balance;
+	type Index = Index;
+	type Signature = Signature;
 }
 
 /// Millau Hasher (Blake2-256 ++ Keccak-256) implementation.
-#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug)]
+#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug, TypeInfo)]
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 pub struct BlakeTwoAndKeccak256;
 
@@ -245,25 +259,36 @@ pub fn max_extrinsic_size() -> u32 {
 	*BlockLength::get().max.get(DispatchClass::Normal)
 }
 
+/// Name of the With-Rialto messages pallet instance in the Millau runtime.
+pub const WITH_RIALTO_MESSAGES_PALLET_NAME: &str = "BridgeRialtoMessages";
+/// Name of the With-Rialto token swap pallet instance in the Millau runtime.
+pub const WITH_RIALTO_TOKEN_SWAP_PALLET_NAME: &str = "BridgeRialtoTokenSwap";
+
 /// Name of the `MillauFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized";
 
-/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToMillauOutboundLaneApi::message_details` runtime method.
 pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details";
 /// Name of the `ToMillauOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_received_nonce";
+pub const TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToMillauOutboundLaneApi_latest_received_nonce";
 /// Name of the `ToMillauOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str = "ToMillauOutboundLaneApi_latest_generated_nonce";
+pub const TO_MILLAU_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToMillauOutboundLaneApi_latest_generated_nonce";
 
 /// Name of the `FromMillauInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_received_nonce";
+pub const FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromMillauInboundLaneApi_latest_received_nonce";
 /// Name of the `FromMillauInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromMillauInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromMillauInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromMillauInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str = "FromMillauInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_MILLAU_UNREWARDED_RELAYERS_STATE: &str =
+	"FromMillauInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Millau headers.
@@ -287,7 +312,7 @@ sp_api::decl_runtime_apis! {
 		///
 		/// Returns `None` if message is too expensive to be sent to Millau from this chain.
 		///
-		/// Please keep in mind that this method returns lowest message fee required for message
+		/// Please keep in mind that this method returns the lowest message fee required for message
 		/// to be accepted to the lane. It may be good idea to pay a bit over this price to account
 		/// future exchange rate changes and guarantee that relayer would deliver your message
 		/// to the target chain.
@@ -318,7 +343,7 @@ sp_api::decl_runtime_apis! {
 	pub trait FromMillauInboundLaneApi {
 		/// Returns nonce of the latest message, received by given lane.
 		fn latest_received_nonce(lane: LaneId) -> MessageNonce;
-		/// Nonce of latest message that has been confirmed to the bridged chain.
+		/// Nonce of the latest message that has been confirmed to the bridged chain.
 		fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce;
 		/// State of the unrewarded relayers set at given lane.
 		fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState;
diff --git a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs
index 936791217af1829cccb42a0a2a2661ea3fe759ea..11968b2f2826701ad0bf46f0fd90870df55e997b 100644
--- a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs
+++ b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs
@@ -15,6 +15,7 @@
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
 use parity_util_mem::MallocSizeOf;
+use scale_info::TypeInfo;
 use sp_runtime::traits::CheckEqual;
 
 // `sp_core::H512` can't be used, because it doesn't implement `CheckEqual`, which is required
@@ -22,7 +23,7 @@ use sp_runtime::traits::CheckEqual;
 
 fixed_hash::construct_fixed_hash! {
 	/// Hash type used in Millau chain.
-	#[derive(MallocSizeOf)]
+	#[derive(MallocSizeOf, TypeInfo)]
 	pub struct MillauHash(64);
 }
 
diff --git a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml
index 22ded41b9145ca690f423939d7bdc611ecc48c55..917c7f97478390864791e907d9d5bcc8cceb8321 100644
--- a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml
@@ -7,16 +7,20 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
+smallvec = "1.7"
 
 # Bridge Dependencies
+
 bp-messages = { path = "../messages", default-features = false }
 bp-polkadot-core = { path = "../polkadot-core", default-features = false }
 bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
 
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
@@ -24,6 +28,8 @@ std = [
 	"bp-messages/std",
 	"bp-polkadot-core/std",
 	"bp-runtime/std",
+	"frame-support/std",
 	"sp-api/std",
 	"sp-std/std",
+	"sp-version/std",
 ]
diff --git a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs
index b0ba77c66ffc34cc7dbb9fd5534832e74cce5c23..26bad1ea8656d1e441b18a6712ca99c55e8a3e97 100644
--- a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs
@@ -21,13 +21,46 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
+use sp_version::RuntimeVersion;
 
 pub use bp_polkadot_core::*;
 
 /// Polkadot Chain
 pub type Polkadot = PolkadotLike;
 
+// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo.
+pub const VERSION: RuntimeVersion = RuntimeVersion {
+	spec_name: sp_version::create_runtime_str!("polkadot"),
+	impl_name: sp_version::create_runtime_str!("parity-polkadot"),
+	authoring_version: 0,
+	spec_version: 9100,
+	impl_version: 0,
+	apis: sp_version::create_apis_vec![[]],
+	transaction_version: 7,
+};
+
+// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo.
+pub struct WeightToFee;
+impl WeightToFeePolynomial for WeightToFee {
+	type Balance = Balance;
+	fn polynomial() -> WeightToFeeCoefficients<Self::Balance> {
+		const CENTS: Balance = 10_000_000_000 / 100;
+		// in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT:
+		let p = CENTS;
+		let q = 10 * Balance::from(ExtrinsicBaseWeight::get());
+		smallvec::smallvec![WeightToFeeCoefficient {
+			degree: 1,
+			negative: false,
+			coeff_frac: Perbill::from_rational(p % q, q),
+			coeff_integer: p / q,
+		}]
+	}
+}
+
 // We use this to get the account on Polkadot (target) which is derived from Kusama's (source)
 // account.
 pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount<AccountId>) -> AccountId {
@@ -35,27 +68,53 @@ pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount<AccountId>) -
 	AccountIdConverter::convert(encoded_id)
 }
 
+/// Per-byte fee for Polkadot transactions.
+pub const TRANSACTION_BYTE_FEE: Balance = 10 * 10_000_000_000 / 100 / 1_000;
+
+/// Existential deposit on Polkadot.
+pub const EXISTENTIAL_DEPOSIT: Balance = 10_000_000_000;
+
+/// The target length of a session (how often authorities change) on Polkadot measured in of number
+/// of blocks.
+///
+/// Note that since this is a target sessions may change before/after this time depending on network
+/// conditions.
+pub const SESSION_LENGTH: BlockNumber = 4 * time_units::HOURS;
+
+/// Name of the With-Kusama messages pallet instance in the Polkadot runtime.
+pub const WITH_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessages";
+
+/// Name of the KSM->DOT conversion rate stored in the Polkadot runtime.
+pub const KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME: &str =
+	"KusamaToPolkadotConversionRate";
+
 /// Name of the `PolkadotFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized";
 /// Name of the `PolkadotFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_is_known_header";
 
-/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method.
 pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details";
 /// Name of the `ToPolkadotOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_generated_nonce";
+pub const TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToPolkadotOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToPolkadotOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "ToPolkadotOutboundLaneApi_latest_received_nonce";
+pub const TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToPolkadotOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromPolkadotInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_received_nonce";
+pub const FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromPolkadotInboundLaneApi_latest_received_nonce";
 /// Name of the `FromPolkadotInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromPolkadotInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromPolkadotInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromPolkadotInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str = "FromPolkadotInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_POLKADOT_UNREWARDED_RELAYERS_STATE: &str =
+	"FromPolkadotInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Polkadot headers.
diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml b/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..034188631b8cde608025ee64baa5b6de1b9be698
--- /dev/null
+++ b/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+name = "bp-rialto-parachain"
+description = "Primitives of Rialto parachain runtime."
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+
+# Bridge Dependencies
+
+bp-messages = { path = "../messages", default-features = false }
+bp-runtime = { path = "../runtime", default-features = false }
+
+# Substrate Based Dependencies
+
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+[features]
+default = ["std"]
+std = [
+	"bp-messages/std",
+	"bp-runtime/std",
+	"frame-support/std",
+	"frame-system/std",
+	"sp-api/std",
+	"sp-core/std",
+	"sp-runtime/std",
+	"sp-std/std",
+]
diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs b/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..826f6d39bd7f73b9d04578d34715c25e5cc530af
--- /dev/null
+++ b/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs
@@ -0,0 +1,128 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+// RuntimeApi generated functions
+#![allow(clippy::too_many_arguments)]
+// Runtime-generated DecodeLimit::decode_all_With_depth_limit
+#![allow(clippy::unnecessary_mut_passed)]
+
+use bp_runtime::Chain;
+use frame_support::{
+	weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight},
+	RuntimeDebug,
+};
+use frame_system::limits;
+use sp_core::Hasher as HasherT;
+use sp_runtime::{
+	traits::{BlakeTwo256, IdentifyAccount, Verify},
+	MultiSignature, MultiSigner, Perbill,
+};
+
+/// Maximal weight of single Rialto parachain block.
+///
+/// This represents two seconds of compute assuming a target block time of six seconds.
+pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND;
+
+/// Represents the average portion of a block's weight that will be used by an
+/// `on_initialize()` runtime call.
+pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10);
+
+/// Represents the portion of a block that will be used by Normal extrinsics.
+pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
+
+/// Block number type used in Rialto.
+pub type BlockNumber = u32;
+
+/// Hash type used in Rialto.
+pub type Hash = <BlakeTwo256 as HasherT>::Out;
+
+/// The type of object that can produce hashes on Rialto.
+pub type Hasher = BlakeTwo256;
+
+/// The header type used by Rialto.
+pub type Header = sp_runtime::generic::Header<BlockNumber, Hasher>;
+
+/// Alias to 512-bit hash when used in the context of a transaction signature on the chain.
+pub type Signature = MultiSignature;
+
+/// Some way of identifying an account on the chain. We intentionally make it equivalent
+/// to the public key of our transaction signing scheme.
+pub type AccountId = <<Signature as Verify>::Signer as IdentifyAccount>::AccountId;
+
+/// Public key of the chain account that may be used to verify signatures.
+pub type AccountSigner = MultiSigner;
+
+/// Balance of an account.
+pub type Balance = u128;
+
+/// An instant or duration in time.
+pub type Moment = u64;
+
+/// Index of a transaction in the parachain.
+pub type Index = u32;
+
+/// Weight-to-Fee type used by Rialto parachain.
+pub type WeightToFee = IdentityFee<Balance>;
+
+/// Rialto parachain.
+#[derive(RuntimeDebug)]
+pub struct RialtoParachain;
+
+impl Chain for RialtoParachain {
+	type BlockNumber = BlockNumber;
+	type Hash = Hash;
+	type Hasher = Hasher;
+	type Header = Header;
+
+	type AccountId = AccountId;
+	type Balance = Balance;
+	type Index = Index;
+	type Signature = Signature;
+}
+
+frame_support::parameter_types! {
+	pub BlockLength: limits::BlockLength =
+		limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO);
+	pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder()
+		// Allowance for Normal class
+		.for_class(DispatchClass::Normal, |weights| {
+			weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT);
+		})
+		// Allowance for Operational class
+		.for_class(DispatchClass::Operational, |weights| {
+			weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT);
+			// Extra reserved space for Operational class
+			weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT);
+		})
+		// By default Mandatory class is not limited at all.
+		// This parameter is used to derive maximal size of a single extrinsic.
+		.avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO)
+		.build_or_panic();
+}
+
+/// Get the maximum weight (compute time) that a Normal extrinsic on the Millau chain can use.
+pub fn max_extrinsic_weight() -> Weight {
+	BlockWeights::get()
+		.get(DispatchClass::Normal)
+		.max_extrinsic
+		.unwrap_or(Weight::MAX)
+}
+
+/// Get the maximum length in bytes that a Normal extrinsic on the Millau chain requires.
+pub fn max_extrinsic_size() -> u32 {
+	*BlockLength::get().max.get(DispatchClass::Normal)
+}
diff --git a/polkadot/bridges/primitives/chain-rialto/Cargo.toml b/polkadot/bridges/primitives/chain-rialto/Cargo.toml
index 7e039a40acd964c9036c8fd3dd81e40baeb431de..d16ac59484fb5da33c5f0d23d6eeadbe5a04bee0 100644
--- a/polkadot/bridges/primitives/chain-rialto/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-rialto/Cargo.toml
@@ -15,12 +15,12 @@ bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/primitives/chain-rialto/src/lib.rs b/polkadot/bridges/primitives/chain-rialto/src/lib.rs
index 8139372959e357c2b4df5232f606aad88e903acf..6c4e48301e3bbd6abbadffb0985245d2d605e84d 100644
--- a/polkadot/bridges/primitives/chain-rialto/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-rialto/src/lib.rs
@@ -23,7 +23,7 @@
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
 use bp_runtime::Chain;
 use frame_support::{
-	weights::{constants::WEIGHT_PER_SECOND, DispatchClass, Weight},
+	weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight},
 	Parameter, RuntimeDebug,
 };
 use frame_system::limits;
@@ -42,7 +42,7 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
 /// Number of bytes, included in the signed Rialto transaction apart from the encoded call itself.
 ///
 /// Can be computed by subtracting encoded call size from raw transaction size.
-pub const TX_EXTRA_BYTES: u32 = 103;
+pub const TX_EXTRA_BYTES: u32 = 104;
 
 /// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id.
 pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32;
@@ -68,29 +68,32 @@ pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 128;
 /// Weight of single regular message delivery transaction on Rialto chain.
 ///
 /// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
-/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered.
-/// The message must have dispatch weight set to zero. The result then must be rounded up to account
-/// possible future runtime upgrades.
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
 pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
 
 /// Increase of delivery transaction weight on Rialto chain with every additional message byte.
 ///
-/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The
-/// result then must be rounded up to account possible future runtime upgrades.
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
 pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
 
 /// Maximal weight of single message delivery confirmation transaction on Rialto chain.
 ///
-/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula computation
-/// for the case when single message is confirmed. The result then must be rounded up to account possible future
-/// runtime upgrades.
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
 pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
 
 /// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain.
 ///
-/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()`
-/// call for your chain. Don't put too much reserve there, because it is used to **decrease**
-/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper.
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
 pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 /// The target length of a session (how often authorities change) on Rialto measured in of number of
@@ -105,7 +108,7 @@ pub use time_units::*;
 
 /// Human readable time units defined in terms of number of blocks.
 pub mod time_units {
-	use super::BlockNumber;
+	use super::{BlockNumber, SESSION_LENGTH};
 
 	pub const MILLISECS_PER_BLOCK: u64 = 6000;
 	pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
@@ -113,6 +116,11 @@ pub mod time_units {
 	pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber);
 	pub const HOURS: BlockNumber = MINUTES * 60;
 	pub const DAYS: BlockNumber = HOURS * 24;
+
+	pub const EPOCH_DURATION_IN_SLOTS: BlockNumber = SESSION_LENGTH;
+
+	// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks.
+	pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
 }
 
 /// Block number type used in Rialto.
@@ -121,7 +129,7 @@ pub type BlockNumber = u32;
 /// Hash type used in Rialto.
 pub type Hash = <BlakeTwo256 as HasherT>::Out;
 
-/// The type of an object that can produce hashes on Rialto.
+/// The type of object that can produce hashes on Rialto.
 pub type Hasher = BlakeTwo256;
 
 /// The header type used by Rialto.
@@ -140,6 +148,15 @@ pub type AccountSigner = MultiSigner;
 /// Balance of an account.
 pub type Balance = u128;
 
+/// An instant or duration in time.
+pub type Moment = u64;
+
+/// Index of a transaction in the chain.
+pub type Index = u32;
+
+/// Weight-to-Fee type used by Rialto.
+pub type WeightToFee = IdentityFee<Balance>;
+
 /// Rialto chain.
 #[derive(RuntimeDebug)]
 pub struct Rialto;
@@ -149,6 +166,11 @@ impl Chain for Rialto {
 	type Hash = Hash;
 	type Hasher = Hasher;
 	type Header = Header;
+
+	type AccountId = AccountId;
+	type Balance = Balance;
+	type Index = Index;
+	type Signature = Signature;
 }
 
 /// Convert a 256-bit hash into an AccountId.
@@ -206,25 +228,40 @@ pub fn max_extrinsic_size() -> u32 {
 	*BlockLength::get().max.get(DispatchClass::Normal)
 }
 
+/// Name of the With-Millau messages pallet instance in the Rialto runtime.
+pub const WITH_MILLAU_MESSAGES_PALLET_NAME: &str = "BridgeMillauMessages";
+
+/// Name of the parachain registrar pallet in the Rialto runtime.
+pub const PARAS_REGISTRAR_PALLET_NAME: &str = "Registrar";
+
+/// Name of the parachains pallet in the Rialto runtime.
+pub const PARAS_PALLET_NAME: &str = "Paras";
+
 /// Name of the `RialtoFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized";
 
-/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method.
 pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details";
 /// Name of the `ToRialtoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_generated_nonce";
+pub const TO_RIALTO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToRialtoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToRialtoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRialtoOutboundLaneApi_latest_received_nonce";
+pub const TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToRialtoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromRialtoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_received_nonce";
+pub const FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromRialtoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromRialtoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRialtoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromRialtoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromRialtoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str = "FromRialtoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_RIALTO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromRialtoInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Rialto headers.
@@ -248,7 +285,7 @@ sp_api::decl_runtime_apis! {
 		///
 		/// Returns `None` if message is too expensive to be sent to Rialto from this chain.
 		///
-		/// Please keep in mind that this method returns lowest message fee required for message
+		/// Please keep in mind that this method returns the lowest message fee required for message
 		/// to be accepted to the lane. It may be good idea to pay a bit over this price to account
 		/// future exchange rate changes and guarantee that relayer would deliver your message
 		/// to the target chain.
@@ -279,7 +316,7 @@ sp_api::decl_runtime_apis! {
 	pub trait FromRialtoInboundLaneApi {
 		/// Returns nonce of the latest message, received by given lane.
 		fn latest_received_nonce(lane: LaneId) -> MessageNonce;
-		/// Nonce of latest message that has been confirmed to the bridged chain.
+		/// Nonce of the latest message that has been confirmed to the bridged chain.
 		fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce;
 		/// State of the unrewarded relayers set at given lane.
 		fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState;
diff --git a/polkadot/bridges/primitives/chain-rococo/Cargo.toml b/polkadot/bridges/primitives/chain-rococo/Cargo.toml
index 33772c7890a07c495296ff683d6a1b8baf7f7fcc..6e1189b05f3635d9b4fbd8c0e965ea35b2fda3f8 100644
--- a/polkadot/bridges/primitives/chain-rococo/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-rococo/Cargo.toml
@@ -7,8 +7,8 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
-smallvec = "1.6"
+parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] }
+smallvec = "1.7"
 
 # Bridge Dependencies
 bp-messages = { path = "../messages", default-features = false }
@@ -16,8 +16,8 @@ bp-polkadot-core = { path = "../polkadot-core", default-features = false }
 bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
diff --git a/polkadot/bridges/primitives/chain-rococo/src/lib.rs b/polkadot/bridges/primitives/chain-rococo/src/lib.rs
index b4faae00eeb31c84d17602088688402a55708b37..b3bbc91976dac1c796be3fe91c9d8f2a36e9b3dd 100644
--- a/polkadot/bridges/primitives/chain-rococo/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-rococo/src/lib.rs
@@ -21,7 +21,9 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
-use frame_support::weights::{WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial};
+use frame_support::weights::{
+	Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -30,8 +32,8 @@ pub use bp_polkadot_core::*;
 /// Rococo Chain
 pub type Rococo = PolkadotLike;
 
-/// The target length of a session (how often authorities change) on Westend measured in of number of
-/// blocks.
+/// The target length of a session (how often authorities change) on Westend measured in of number
+/// of blocks.
 ///
 /// Note that since this is a target sessions may change before/after this time depending on network
 /// conditions.
@@ -72,27 +74,45 @@ pub fn derive_account_from_wococo_id(id: bp_runtime::SourceAccount<AccountId>) -
 	AccountIdConverter::convert(encoded_id)
 }
 
+/// Name of the With-Wococo messages pallet instance in the Rococo runtime.
+pub const WITH_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages";
+
 /// Name of the `RococoFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized";
 /// Name of the `RococoFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_is_known_header";
 
-/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToRococoOutboundLaneApi::message_details` runtime method.
 pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details";
 /// Name of the `ToRococoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_generated_nonce";
+pub const TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToRococoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToRococoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToRococoOutboundLaneApi_latest_received_nonce";
+pub const TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToRococoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromRococoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_received_nonce";
+pub const FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromRococoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromRococoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromRococoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromRococoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromRococoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromRococoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_ROCOCO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromRococoInboundLaneApi_unrewarded_relayers_state";
+
+/// Weight of pay-dispatch-fee operation for inbound messages at Rococo chain.
+///
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
+pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Rococo headers.
diff --git a/polkadot/bridges/primitives/chain-westend/Cargo.toml b/polkadot/bridges/primitives/chain-westend/Cargo.toml
index d5fda1ccef05a1c5ee8b9ffa2cf32cd107b43d98..4fd1652744ed6473690016bcf9b812ae29505c85 100644
--- a/polkadot/bridges/primitives/chain-westend/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-westend/Cargo.toml
@@ -7,16 +7,21 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+smallvec = "1.7"
 
 # Bridge Dependencies
+
 bp-header-chain = { path = "../header-chain", default-features = false }
 bp-messages = { path = "../messages", default-features = false }
 bp-polkadot-core = { path = "../polkadot-core", default-features = false }
 bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
@@ -28,7 +33,9 @@ std = [
 	"bp-messages/std",
 	"bp-polkadot-core/std",
 	"bp-runtime/std",
+	"frame-support/std",
 	"parity-scale-codec/std",
+	"scale-info/std",
 	"sp-api/std",
 	"sp-runtime/std",
 	"sp-std/std",
diff --git a/polkadot/bridges/primitives/chain-westend/src/lib.rs b/polkadot/bridges/primitives/chain-westend/src/lib.rs
index e3c4d733def9db8fedd4b824170c899e5b7867d0..8beb897f59a15b8b22deeb88fd6099147aab98a9 100644
--- a/polkadot/bridges/primitives/chain-westend/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-westend/src/lib.rs
@@ -21,7 +21,10 @@
 #![allow(clippy::unnecessary_mut_passed)]
 
 use bp_messages::{LaneId, MessageDetails, MessageNonce, UnrewardedRelayersState};
-use bp_runtime::Chain;
+use frame_support::weights::{
+	WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial,
+};
+use scale_info::TypeInfo;
 use sp_std::prelude::*;
 use sp_version::RuntimeVersion;
 
@@ -30,7 +33,23 @@ pub use bp_polkadot_core::*;
 /// Westend Chain
 pub type Westend = PolkadotLike;
 
-pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
+// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo.
+pub struct WeightToFee;
+impl WeightToFeePolynomial for WeightToFee {
+	type Balance = Balance;
+	fn polynomial() -> WeightToFeeCoefficients<Self::Balance> {
+		const CENTS: Balance = 1_000_000_000_000 / 1_000;
+		// in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT:
+		let p = CENTS;
+		let q = 10 * Balance::from(ExtrinsicBaseWeight::get());
+		smallvec::smallvec![WeightToFeeCoefficient {
+			degree: 1,
+			negative: false,
+			coeff_frac: Perbill::from_rational(p % q, q),
+			coeff_integer: p / q,
+		}]
+	}
+}
 
 // NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo.
 pub const VERSION: RuntimeVersion = RuntimeVersion {
@@ -45,32 +64,11 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
 
 /// Westend Runtime `Call` enum.
 ///
-/// The enum represents a subset of possible `Call`s we can send to Westend chain.
-/// Ideally this code would be auto-generated from Metadata, because we want to
-/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
-///
-/// All entries here (like pretty much in the entire file) must be kept in sync with Westend
-/// `construct_runtime`, so that we maintain SCALE-compatibility.
-///
-/// See: https://github.com/paritytech/polkadot/blob/master/runtime/westend/src/lib.rs
-#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
-pub enum Call {
-	/// Rococo bridge pallet.
-	#[codec(index = 40)]
-	BridgeGrandpaRococo(BridgeGrandpaRococoCall),
-}
-
-#[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone)]
-#[allow(non_camel_case_types)]
-pub enum BridgeGrandpaRococoCall {
-	#[codec(index = 0)]
-	submit_finality_proof(
-		<PolkadotLike as Chain>::Header,
-		bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
-	),
-	#[codec(index = 1)]
-	initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
-}
+/// We are not currently submitting any Westend transactions => it is empty.
+#[derive(
+	parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone, TypeInfo,
+)]
+pub enum Call {}
 
 impl sp_runtime::traits::Dispatchable for Call {
 	type Origin = ();
@@ -95,25 +93,31 @@ pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_
 /// Name of the `WestendFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_is_known_header";
 
-/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToWestendOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_WESTEND_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToWestendOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToWestendOutboundLaneApi::message_details` runtime method.
 pub const TO_WESTEND_MESSAGE_DETAILS_METHOD: &str = "ToWestendOutboundLaneApi_message_details";
 /// Name of the `ToWestendOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_generated_nonce";
+pub const TO_WESTEND_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToWestendOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToWestendOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWestendOutboundLaneApi_latest_received_nonce";
+pub const TO_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToWestendOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromWestendInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_received_nonce";
+pub const FROM_WESTEND_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromWestendInboundLaneApi_latest_received_nonce";
 /// Name of the `FromWestendInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWestendInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_WESTEND_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromWestendInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromWestendInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str = "FromWestendInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_WESTEND_UNREWARDED_RELAYERS_STATE: &str =
+	"FromWestendInboundLaneApi_unrewarded_relayers_state";
 
-/// The target length of a session (how often authorities change) on Westend measured in of number of
-/// blocks.
+/// The target length of a session (how often authorities change) on Westend measured in of number
+/// of blocks.
 ///
 /// Note that since this is a target sessions may change before/after this time depending on network
 /// conditions.
diff --git a/polkadot/bridges/primitives/chain-wococo/Cargo.toml b/polkadot/bridges/primitives/chain-wococo/Cargo.toml
index 88201dde9ac191f392613de93ec999935568d3ac..d99783695ad313828e2dd4c4f259233c39b99ed5 100644
--- a/polkadot/bridges/primitives/chain-wococo/Cargo.toml
+++ b/polkadot/bridges/primitives/chain-wococo/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] }
 
 # Bridge Dependencies
 bp-messages = { path = "../messages", default-features = false }
@@ -16,7 +16,7 @@ bp-rococo = { path = "../chain-rococo", default-features = false }
 bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
diff --git a/polkadot/bridges/primitives/chain-wococo/src/lib.rs b/polkadot/bridges/primitives/chain-wococo/src/lib.rs
index 24572e141b20fb001179d676131cf1eb5089fb7d..fe2ce3a309a6bf6ab6ab16bf2390980472a8511b 100644
--- a/polkadot/bridges/primitives/chain-wococo/src/lib.rs
+++ b/polkadot/bridges/primitives/chain-wococo/src/lib.rs
@@ -25,7 +25,7 @@ use sp_std::prelude::*;
 
 pub use bp_polkadot_core::*;
 // Rococo runtime = Wococo runtime
-pub use bp_rococo::{WeightToFee, SESSION_LENGTH, VERSION};
+pub use bp_rococo::{WeightToFee, PAY_INBOUND_DISPATCH_FEE_WEIGHT, SESSION_LENGTH, VERSION};
 
 /// Wococo Chain
 pub type Wococo = PolkadotLike;
@@ -37,27 +37,36 @@ pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount<AccountId>) -
 	AccountIdConverter::convert(encoded_id)
 }
 
+/// Name of the With-Rococo messages pallet instance in the Wococo runtime.
+pub const WITH_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessages";
+
 /// Name of the `WococoFinalityApi::best_finalized` runtime method.
 pub const BEST_FINALIZED_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_best_finalized";
 /// Name of the `WococoFinalityApi::is_known_header` runtime method.
 pub const IS_KNOWN_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_is_known_header";
 
-/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime method.
+/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime
+/// method.
 pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str =
 	"ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee";
 /// Name of the `ToWococoOutboundLaneApi::message_details` runtime method.
 pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details";
 /// Name of the `ToWococoOutboundLaneApi::latest_generated_nonce` runtime method.
-pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_generated_nonce";
+pub const TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD: &str =
+	"ToWococoOutboundLaneApi_latest_generated_nonce";
 /// Name of the `ToWococoOutboundLaneApi::latest_received_nonce` runtime method.
-pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "ToWococoOutboundLaneApi_latest_received_nonce";
+pub const TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"ToWococoOutboundLaneApi_latest_received_nonce";
 
 /// Name of the `FromWococoInboundLaneApi::latest_received_nonce` runtime method.
-pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_received_nonce";
+pub const FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD: &str =
+	"FromWococoInboundLaneApi_latest_received_nonce";
 /// Name of the `FromWococoInboundLaneApi::latest_onfirmed_nonce` runtime method.
-pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str = "FromWococoInboundLaneApi_latest_confirmed_nonce";
+pub const FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD: &str =
+	"FromWococoInboundLaneApi_latest_confirmed_nonce";
 /// Name of the `FromWococoInboundLaneApi::unrewarded_relayers_state` runtime method.
-pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str = "FromWococoInboundLaneApi_unrewarded_relayers_state";
+pub const FROM_WOCOCO_UNREWARDED_RELAYERS_STATE: &str =
+	"FromWococoInboundLaneApi_unrewarded_relayers_state";
 
 sp_api::decl_runtime_apis! {
 	/// API for querying information about the finalized Wococo headers.
@@ -81,7 +90,7 @@ sp_api::decl_runtime_apis! {
 		///
 		/// Returns `None` if message is too expensive to be sent to Wococo from this chain.
 		///
-		/// Please keep in mind that this method returns lowest message fee required for message
+		/// Please keep in mind that this method returns the lowest message fee required for message
 		/// to be accepted to the lane. It may be good idea to pay a bit over this price to account
 		/// future exchange rate changes and guarantee that relayer would deliver your message
 		/// to the target chain.
@@ -112,7 +121,7 @@ sp_api::decl_runtime_apis! {
 	pub trait FromWococoInboundLaneApi {
 		/// Returns nonce of the latest message, received by given lane.
 		fn latest_received_nonce(lane: LaneId) -> MessageNonce;
-		/// Nonce of latest message that has been confirmed to the bridged chain.
+		/// Nonce of the latest message that has been confirmed to the bridged chain.
 		fn latest_confirmed_nonce(lane: LaneId) -> MessageNonce;
 		/// State of the unrewarded relayers set at given lane.
 		fn unrewarded_relayers_state(lane: LaneId) -> UnrewardedRelayersState;
diff --git a/polkadot/bridges/primitives/currency-exchange/Cargo.toml b/polkadot/bridges/primitives/currency-exchange/Cargo.toml
index 43367ba7992b1751825c775c397a9d2945f20c36..165891f0c6b1d418091f4227b65a368acc946b27 100644
--- a/polkadot/bridges/primitives/currency-exchange/Cargo.toml
+++ b/polkadot/bridges/primitives/currency-exchange/Cargo.toml
@@ -7,19 +7,21 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
 std = [
 	"codec/std",
 	"frame-support/std",
+	"scale-info/std",
 	"sp-api/std",
 	"sp-std/std",
 ]
diff --git a/polkadot/bridges/primitives/currency-exchange/src/lib.rs b/polkadot/bridges/primitives/currency-exchange/src/lib.rs
index 88695dbb5ef406cedd997fe434aa8f5e39befb35..1a30915b1cbfa7ab8305515c36e28cefcb56f936 100644
--- a/polkadot/bridges/primitives/currency-exchange/src/lib.rs
+++ b/polkadot/bridges/primitives/currency-exchange/src/lib.rs
@@ -22,6 +22,7 @@
 
 use codec::{Decode, Encode, EncodeLike};
 use frame_support::{Parameter, RuntimeDebug};
+use scale_info::TypeInfo;
 use sp_api::decl_runtime_apis;
 use sp_std::marker::PhantomData;
 
@@ -36,7 +37,7 @@ pub enum Error {
 	InvalidRecipient,
 	/// Cannot map from peer recipient to this blockchain recipient.
 	FailedToMapRecipients,
-	/// Failed to convert from peer blockchain currency to this blockhain currency.
+	/// Failed to convert from peer blockchain currency to this blockchain currency.
 	FailedToConvertCurrency,
 	/// Deposit has failed.
 	DepositFailed,
@@ -48,7 +49,7 @@ pub enum Error {
 pub type Result<T> = sp_std::result::Result<T, Error>;
 
 /// Peer blockchain lock funds transaction.
-#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)]
+#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
 pub struct LockFundsTransaction<TransferId, Recipient, Amount> {
 	/// Something that uniquely identifies this transfer.
 	pub id: TransferId,
@@ -63,7 +64,7 @@ pub trait MaybeLockFundsTransaction {
 	/// Transaction type.
 	type Transaction;
 	/// Identifier that uniquely identifies this transfer.
-	type Id: Decode + Encode + EncodeLike + sp_std::fmt::Debug;
+	type Id: Decode + Encode + TypeInfo + EncodeLike + sp_std::fmt::Debug;
 	/// Peer recipient type.
 	type Recipient;
 	/// Peer currency amount type.
@@ -71,7 +72,9 @@ pub trait MaybeLockFundsTransaction {
 
 	/// Parse lock funds transaction of the peer blockchain. Returns None if
 	/// transaction format is unknown, or it isn't a lock funds transaction.
-	fn parse(tx: &Self::Transaction) -> Result<LockFundsTransaction<Self::Id, Self::Recipient, Self::Amount>>;
+	fn parse(
+		tx: &Self::Transaction,
+	) -> Result<LockFundsTransaction<Self::Id, Self::Recipient, Self::Amount>>;
 }
 
 /// Map that maps recipients from peer blockchain to this blockchain recipients.
diff --git a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml b/polkadot/bridges/primitives/ethereum-poa/Cargo.toml
index cd2c3a97a0f32095dd8812246b663235fc8f3099..71f071bbf0e6df2c7b99fd770ee8d6fc5364037e 100644
--- a/polkadot/bridges/primitives/ethereum-poa/Cargo.toml
+++ b/polkadot/bridges/primitives/ethereum-poa/Cargo.toml
@@ -7,27 +7,28 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 ethbloom = { version = "0.10.0", default-features = false, features = ["rlp"] }
 fixed-hash = { version = "0.7", default-features = false }
 hash-db = { version = "0.15.2", default-features = false }
 impl-rlp = { version = "0.3", default-features = false }
 impl-serde = { version = "0.3.1", optional = true }
-libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] }
+libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac", "static-context"] }
 parity-bytes = { version = "0.1", default-features = false }
 plain_hasher = { version = "0.2.2", default-features = false }
-primitive-types = { version = "0.9", default-features = false, features = ["codec", "rlp"] }
+primitive-types = { version = "0.10", default-features = false, features = ["codec", "rlp"] }
 rlp = { version = "0.5", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0", optional = true }
 serde-big-array = { version = "0.2", optional = true }
 triehash = { version = "0.8.2", default-features = false }
 
 # Substrate Dependencies
 
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 hex-literal = "0.2"
@@ -47,6 +48,7 @@ std = [
 	"primitive-types/std",
 	"primitive-types/serde",
 	"rlp/std",
+	"scale-info/std",
 	"serde/std",
 	"serde-big-array",
 	"sp-api/std",
diff --git a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs
index b912262992d2ad4495ff2a954af7c9321c4bc697..d420e0b1ec4ba41ddd5f93db838ecd4bbfc47a2a 100644
--- a/polkadot/bridges/primitives/ethereum-poa/src/lib.rs
+++ b/polkadot/bridges/primitives/ethereum-poa/src/lib.rs
@@ -28,6 +28,7 @@ use codec::{Decode, Encode};
 use ethbloom::{Bloom as EthBloom, Input as BloomInput};
 use fixed_hash::construct_fixed_hash;
 use rlp::{Decodable, DecoderError, Rlp, RlpStream};
+use scale_info::TypeInfo;
 use sp_io::hashing::keccak_256;
 use sp_runtime::RuntimeDebug;
 use sp_std::prelude::*;
@@ -57,7 +58,7 @@ pub type Address = H160;
 pub mod signatures;
 
 /// Complete header id.
-#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy)]
+#[derive(Encode, Decode, Default, RuntimeDebug, PartialEq, Clone, Copy, TypeInfo)]
 pub struct HeaderId {
 	/// Header number.
 	pub number: u64,
@@ -66,7 +67,7 @@ pub struct HeaderId {
 }
 
 /// An Aura header.
-#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Clone, Default, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 pub struct AuraHeader {
 	/// Parent block hash.
@@ -129,7 +130,7 @@ pub struct UnsignedTransaction {
 }
 
 /// Information describing execution of a transaction.
-#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct Receipt {
 	/// The total gas used in the block following execution of the transaction.
 	pub gas_used: U256,
@@ -142,7 +143,7 @@ pub struct Receipt {
 }
 
 /// Transaction outcome store in the receipt.
-#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub enum TransactionOutcome {
 	/// Status and state root are unknown under EIP-98 rules.
 	Unknown,
@@ -153,7 +154,7 @@ pub enum TransactionOutcome {
 }
 
 /// A record of execution for a `LOG` operation.
-#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug)]
+#[derive(Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)]
 pub struct LogEntry {
 	/// The address of the contract executing at the point of the `LOG` operation.
 	pub address: Address,
@@ -164,7 +165,7 @@ pub struct LogEntry {
 }
 
 /// Logs bloom.
-#[derive(Clone, Encode, Decode)]
+#[derive(Clone, Encode, Decode, TypeInfo)]
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 pub struct Bloom(#[cfg_attr(feature = "std", serde(with = "BigArray"))] [u8; 256]);
 
@@ -185,10 +186,7 @@ pub struct SealedEmptyStep {
 impl AuraHeader {
 	/// Compute id of this header.
 	pub fn compute_id(&self) -> HeaderId {
-		HeaderId {
-			number: self.number,
-			hash: self.compute_hash(),
-		}
+		HeaderId { number: self.number, hash: self.compute_hash() }
 	}
 
 	/// Compute hash of this header (keccak of the RLP with seal).
@@ -198,10 +196,9 @@ impl AuraHeader {
 
 	/// Get id of this header' parent. Returns None if this is genesis header.
 	pub fn parent_id(&self) -> Option<HeaderId> {
-		self.number.checked_sub(1).map(|parent_number| HeaderId {
-			number: parent_number,
-			hash: self.parent_hash,
-		})
+		self.number
+			.checked_sub(1)
+			.map(|parent_number| HeaderId { number: parent_number, hash: self.parent_hash })
 	}
 
 	/// Check if passed transactions receipts are matching receipts root in this header.
@@ -238,7 +235,7 @@ impl AuraHeader {
 				let mut message = self.compute_hash().as_bytes().to_vec();
 				message.extend_from_slice(self.seal.get(2)?);
 				keccak_256(&message).into()
-			}
+			},
 			false => keccak_256(&self.rlp(false)).into(),
 		})
 	}
@@ -255,9 +252,7 @@ impl AuraHeader {
 
 	/// Extracts the empty steps from the header seal.
 	pub fn empty_steps(&self) -> Option<Vec<SealedEmptyStep>> {
-		self.seal
-			.get(2)
-			.and_then(|x| Rlp::new(x).as_list::<SealedEmptyStep>().ok())
+		self.seal.get(2).and_then(|x| Rlp::new(x).as_list::<SealedEmptyStep>().ok())
 	}
 
 	/// Returns header RLP with or without seals.
@@ -323,7 +318,7 @@ impl UnsignedTransaction {
 		stream.out().to_vec()
 	}
 
-	/// Encode to given rlp stream.
+	/// Encode to given RLP stream.
 	pub fn rlp_to(&self, chain_id: Option<u64>, stream: &mut RlpStream) {
 		stream.append(&self.nonce);
 		stream.append(&self.gas_price);
@@ -368,15 +363,15 @@ impl Receipt {
 		match self.outcome {
 			TransactionOutcome::Unknown => {
 				s.begin_list(3);
-			}
+			},
 			TransactionOutcome::StateRoot(ref root) => {
 				s.begin_list(4);
 				s.append(root);
-			}
+			},
 			TransactionOutcome::StatusCode(ref status_code) => {
 				s.begin_list(4);
 				s.append(status_code);
-			}
+			},
 		}
 		s.append(&self.gas_used);
 		s.append(&EthBloom::from(self.log_bloom.0));
@@ -405,7 +400,7 @@ impl SealedEmptyStep {
 		keccak_256(&message.out()).into()
 	}
 
-	/// Returns rlp for the vector of empty steps (we only do encoding in tests).
+	/// Returns RLP for the vector of empty steps (we only do encoding in tests).
 	pub fn rlp_of(empty_steps: &[SealedEmptyStep]) -> Bytes {
 		let mut s = RlpStream::new();
 		s.begin_list(empty_steps.len());
@@ -428,13 +423,13 @@ impl Decodable for SealedEmptyStep {
 impl LogEntry {
 	/// Calculates the bloom of this log entry.
 	pub fn bloom(&self) -> Bloom {
-		let eth_bloom =
-			self.topics
-				.iter()
-				.fold(EthBloom::from(BloomInput::Raw(self.address.as_bytes())), |mut b, t| {
-					b.accrue(BloomInput::Raw(t.as_bytes()));
-					b
-				});
+		let eth_bloom = self.topics.iter().fold(
+			EthBloom::from(BloomInput::Raw(self.address.as_bytes())),
+			|mut b, t| {
+				b.accrue(BloomInput::Raw(t.as_bytes()));
+				b
+			},
+		);
 		Bloom(*eth_bloom.data())
 	}
 }
@@ -458,6 +453,8 @@ impl PartialEq<Bloom> for Bloom {
 	}
 }
 
+// there's no default for [_; 256], but clippy still complains
+#[allow(clippy::derivable_impls)]
 impl Default for Bloom {
 	fn default() -> Self {
 		Bloom([0; 256])
@@ -496,14 +493,12 @@ pub fn transaction_decode_rlp(raw_tx: &[u8]) -> Result<Transaction, DecoderError
 	let message = unsigned.message(chain_id);
 
 	// recover tx sender
-	let sender_public = sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes())
-		.map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?;
+	let sender_public =
+		sp_io::crypto::secp256k1_ecdsa_recover(&signature, message.as_fixed_bytes())
+			.map_err(|_| rlp::DecoderError::Custom("Failed to recover transaction sender"))?;
 	let sender_address = public_to_address(&sender_public);
 
-	Ok(Transaction {
-		sender: sender_address,
-		unsigned,
-	})
+	Ok(Transaction { sender: sender_address, unsigned })
 }
 
 /// Convert public key into corresponding ethereum address.
@@ -517,7 +512,10 @@ pub fn public_to_address(public: &[u8; 64]) -> Address {
 /// Check ethereum merkle proof.
 /// Returns Ok(computed-root) if check succeeds.
 /// Returns Err(computed-root) if check fails.
-fn check_merkle_proof<T: AsRef<[u8]>>(expected_root: H256, items: impl Iterator<Item = T>) -> Result<H256, H256> {
+fn check_merkle_proof<T: AsRef<[u8]>>(
+	expected_root: H256,
+	items: impl Iterator<Item = T>,
+) -> Result<H256, H256> {
 	let computed_root = compute_merkle_root(items);
 	if computed_root == expected_root {
 		Ok(computed_root)
diff --git a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs
index a4e076f2200c6217b6148a212d16d4820465f171..26371f2166ad542aa4f1d64e76c86c4490882baf 100644
--- a/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs
+++ b/polkadot/bridges/primitives/ethereum-poa/src/signatures.rs
@@ -20,14 +20,14 @@
 //! Used for testing and benchmarking.
 
 // reexport to avoid direct secp256k1 deps by other crates
-pub use secp256k1::SecretKey;
+pub use libsecp256k1::SecretKey;
 
 use crate::{
-	public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction, UnsignedTransaction, H256,
-	H520, U256,
+	public_to_address, rlp_encode, step_validator, Address, AuraHeader, RawTransaction,
+	UnsignedTransaction, H256, H520, U256,
 };
 
-use secp256k1::{Message, PublicKey};
+use libsecp256k1::{Message, PublicKey};
 
 /// Utilities for signing headers.
 pub trait SignHeader {
@@ -80,7 +80,8 @@ impl SignTransaction for UnsignedTransaction {
 
 /// Return author's signature over given message.
 pub fn sign(author: &SecretKey, message: H256) -> H520 {
-	let (signature, recovery_id) = secp256k1::sign(&Message::parse(message.as_fixed_bytes()), author);
+	let (signature, recovery_id) =
+		libsecp256k1::sign(&Message::parse(message.as_fixed_bytes()), author);
 	let mut raw_signature = [0u8; 65];
 	raw_signature[..64].copy_from_slice(&signature.serialize());
 	raw_signature[64] = recovery_id.serialize();
@@ -116,10 +117,7 @@ mod tests {
 		let raw_tx = unsigned.clone().sign_by(&signer, Some(42));
 		assert_eq!(
 			transaction_decode_rlp(&raw_tx),
-			Ok(Transaction {
-				sender: signer_address,
-				unsigned,
-			}),
+			Ok(Transaction { sender: signer_address, unsigned }),
 		);
 
 		// case2: without chain_id replay protection + contract creation
@@ -134,10 +132,7 @@ mod tests {
 		let raw_tx = unsigned.clone().sign_by(&signer, None);
 		assert_eq!(
 			transaction_decode_rlp(&raw_tx),
-			Ok(Transaction {
-				sender: signer_address,
-				unsigned,
-			}),
+			Ok(Transaction { sender: signer_address, unsigned }),
 		);
 	}
 }
diff --git a/polkadot/bridges/primitives/header-chain/Cargo.toml b/polkadot/bridges/primitives/header-chain/Cargo.toml
index e64a54a1ad20fed03dfb402243509499052851fe..76b710247f746c0358791a9e7f714dd18eec7bb9 100644
--- a/polkadot/bridges/primitives/header-chain/Cargo.toml
+++ b/polkadot/bridges/primitives/header-chain/Cargo.toml
@@ -7,18 +7,18 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
-finality-grandpa = { version = "0.14.4", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
+finality-grandpa = { version = "0.14.0", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 serde = { version = "1.0", optional = true }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [dev-dependencies]
 assert_matches = "1.5"
@@ -32,6 +32,7 @@ std = [
 	"scale-info/std",
 	"serde/std",
 	"frame-support/std",
+	"scale-info/std",
 	"sp-core/std",
 	"sp-finality-grandpa/std",
 	"sp-runtime/std",
diff --git a/polkadot/bridges/primitives/header-chain/src/justification.rs b/polkadot/bridges/primitives/header-chain/src/justification.rs
index fc72564810685a942c3191e0d93679ed14cfa9ad..5f3b72517894842b37d6a8d6117543f05e002111 100644
--- a/polkadot/bridges/primitives/header-chain/src/justification.rs
+++ b/polkadot/bridges/primitives/header-chain/src/justification.rs
@@ -25,8 +25,10 @@ use frame_support::RuntimeDebug;
 use scale_info::TypeInfo;
 use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId};
 use sp_runtime::traits::Header as HeaderT;
-use sp_std::collections::{btree_map::BTreeMap, btree_set::BTreeSet};
-use sp_std::prelude::*;
+use sp_std::{
+	collections::{btree_map::BTreeMap, btree_set::BTreeSet},
+	prelude::*,
+};
 
 /// A GRANDPA Justification is a proof that a given header was finalized
 /// at a certain height and with a certain set of authorities.
@@ -38,7 +40,8 @@ pub struct GrandpaJustification<Header: HeaderT> {
 	/// The round (voting period) this justification is valid for.
 	pub round: u64,
 	/// The set of votes for the chain which is to be finalized.
-	pub commit: finality_grandpa::Commit<Header::Hash, Header::Number, AuthoritySignature, AuthorityId>,
+	pub commit:
+		finality_grandpa::Commit<Header::Hash, Header::Number, AuthoritySignature, AuthorityId>,
 	/// A proof that the chain of blocks in the commit are related to each other.
 	pub votes_ancestries: Vec<Header>,
 }
@@ -58,7 +61,8 @@ pub enum Error {
 	InvalidJustificationTarget,
 	/// The authority has provided an invalid signature.
 	InvalidAuthoritySignature,
-	/// The justification contains pre-commit for header that is not a descendant of the commit header.
+	/// The justification contains precommit for header that is not a descendant of the commit
+	/// header.
 	PrecommitIsNotCommitDescendant,
 	/// The cumulative weight of all votes in the justification is not enough to justify commit
 	/// header finalization.
@@ -88,7 +92,7 @@ where
 {
 	// ensure that it is justification for the expected header
 	if (justification.commit.target_hash, justification.commit.target_number) != finalized_target {
-		return Err(Error::InvalidJustificationTarget);
+		return Err(Error::InvalidJustificationTarget)
 	}
 
 	let mut chain = AncestryChain::new(&justification.votes_ancestries);
@@ -100,30 +104,32 @@ where
 		let authority_info = match authorities_set.get(&signed.id) {
 			Some(authority_info) => authority_info,
 			None => {
-				// just ignore precommit from unknown authority as `finality_grandpa::import_precommit` does
-				continue;
-			}
+				// just ignore precommit from unknown authority as
+				// `finality_grandpa::import_precommit` does
+				continue
+			},
 		};
 
 		// check if authority has already voted in the same round.
 		//
 		// there's a lot of code in `validate_commit` and `import_precommit` functions inside
-		// `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing that we
-		// care about is that only first vote from the authority is accepted
+		// `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing
+		// that we care about is that only first vote from the authority is accepted
 		if !votes.insert(signed.id.clone()) {
-			continue;
+			continue
 		}
 
 		// everything below this line can't just `continue`, because state is already altered
 
 		// all precommits must be for block higher than the target
 		if signed.precommit.target_number < justification.commit.target_number {
-			return Err(Error::PrecommitIsNotCommitDescendant);
+			return Err(Error::PrecommitIsNotCommitDescendant)
 		}
-		// all precommits must be for target block descendants
-		chain = chain.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
-		// since we know now that the precommit target is the descendant of the justification target,
-		// we may increase 'weight' of the justification target
+		// all precommits must be for target block descendents
+		chain = chain
+			.ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?;
+		// since we know now that the precommit target is the descendant of the justification
+		// target, we may increase 'weight' of the justification target
 		//
 		// there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate,
 		// but in the end it is only used to find GHOST, which we don't care about. The only thing
@@ -145,13 +151,13 @@ where
 			authorities_set_id,
 			&mut signature_buffer,
 		) {
-			return Err(Error::InvalidAuthoritySignature);
+			return Err(Error::InvalidAuthoritySignature)
 		}
 	}
 
 	// check that there are no extra headers in the justification
 	if !chain.unvisited.is_empty() {
-		return Err(Error::ExtraHeadersInVotesAncestries);
+		return Err(Error::ExtraHeadersInVotesAncestries)
 	}
 
 	// check that the cumulative weight of validators voted for the justification target (or one
@@ -169,7 +175,7 @@ where
 pub struct AncestryChain<Header: HeaderT> {
 	/// Header hash => parent header hash mapping.
 	pub parents: BTreeMap<Header::Hash, Header::Hash>,
-	/// Hashes of headers that weren't visited by `is_ancestor` method.
+	/// Hashes of headers that were not visited by `is_ancestor` method.
 	pub unvisited: BTreeSet<Header::Hash>,
 }
 
@@ -187,7 +193,8 @@ impl<Header: HeaderT> AncestryChain<Header> {
 		AncestryChain { parents, unvisited }
 	}
 
-	/// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and `Ok(_)` otherwise.
+	/// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and
+	/// `Ok(_)` otherwise.
 	pub fn ensure_descendant(
 		mut self,
 		commit_target: &Header::Hash,
@@ -196,22 +203,22 @@ impl<Header: HeaderT> AncestryChain<Header> {
 		let mut current_hash = *precommit_target;
 		loop {
 			if current_hash == *commit_target {
-				break;
+				break
 			}
 
 			let is_visited_before = !self.unvisited.remove(&current_hash);
 			current_hash = match self.parents.get(&current_hash) {
 				Some(parent_hash) => {
 					if is_visited_before {
-						// `Some(parent_hash)` means that the `current_hash` is in the `parents` container
-						// `is_visited_before` means that it has been visited before in some of previous calls
-						// => since we assume that previous call has finished with `true`, this also will
-						//    be finished with `true`
-						return Ok(self);
+						// `Some(parent_hash)` means that the `current_hash` is in the `parents`
+						// container `is_visited_before` means that it has been visited before in
+						// some of previous calls => since we assume that previous call has finished
+						// with `true`, this also will    be finished with `true`
+						return Ok(self)
 					}
 
 					*parent_hash
-				}
+				},
 				None => return Err(Error::PrecommitIsNotCommitDescendant),
 			};
 		}
diff --git a/polkadot/bridges/primitives/header-chain/src/lib.rs b/polkadot/bridges/primitives/header-chain/src/lib.rs
index 16511e99f79e706dee6e0561d2f3f852aba0b406..5feb30aec3eeeb136b9558bf0efe8e5cf4e8cf85 100644
--- a/polkadot/bridges/primitives/header-chain/src/lib.rs
+++ b/polkadot/bridges/primitives/header-chain/src/lib.rs
@@ -20,24 +20,21 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use codec::{Codec, Decode, Encode, EncodeLike};
-use core::clone::Clone;
-use core::cmp::Eq;
-use core::default::Default;
-use core::fmt::Debug;
+use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug};
 use scale_info::TypeInfo;
 #[cfg(feature = "std")]
 use serde::{Deserialize, Serialize};
 use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID};
-use sp_runtime::RuntimeDebug;
-use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT};
+use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT, RuntimeDebug};
+use sp_std::boxed::Box;
 
 pub mod justification;
 
 /// A type that can be used as a parameter in a dispatchable function.
 ///
 /// When using `decl_module` all arguments for call functions must implement this trait.
-pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug {}
-impl<T> Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug {}
+pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
+impl<T> Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {}
 
 /// A GRANDPA Authority List and ID.
 #[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)]
@@ -63,7 +60,7 @@ impl AuthoritySet {
 #[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
 pub struct InitializationData<H: HeaderT> {
 	/// The header from which we should start syncing.
-	pub header: H,
+	pub header: Box<H>,
 	/// The initial authorities of the pallet.
 	pub authority_list: AuthorityList,
 	/// The ID of the initial authority set.
@@ -82,7 +79,9 @@ pub trait InclusionProofVerifier {
 	/// Verify that transaction is a part of given block.
 	///
 	/// Returns Some(transaction) if proof is valid and None otherwise.
-	fn verify_transaction_inclusion_proof(proof: &Self::TransactionInclusionProof) -> Option<Self::Transaction>;
+	fn verify_transaction_inclusion_proof(
+		proof: &Self::TransactionInclusionProof,
+	) -> Option<Self::Transaction>;
 }
 
 /// A trait for pallets which want to keep track of finalized headers from a bridged chain.
diff --git a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs b/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs
index 0b55c19035287bd72932d66c8c8b6e5b6ba416cd..51275bbd645e50d45759df82672b1002eea161a2 100644
--- a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs
+++ b/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs
@@ -23,8 +23,8 @@
 use assert_matches::assert_matches;
 use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification};
 use bp_test_utils::{
-	header_id, make_justification_for_header, signed_precommit, test_header, Account, JustificationGeneratorParams,
-	ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID,
+	header_id, make_justification_for_header, signed_precommit, test_header, Account,
+	JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID,
 };
 use finality_grandpa::voter_set::VoterSet;
 use sp_finality_grandpa::{AuthorityId, AuthorityWeight};
@@ -44,18 +44,22 @@ impl AncestryChain {
 }
 
 impl finality_grandpa::Chain<TestHash, TestNumber> for AncestryChain {
-	fn ancestry(&self, base: TestHash, block: TestHash) -> Result<Vec<TestHash>, finality_grandpa::Error> {
+	fn ancestry(
+		&self,
+		base: TestHash,
+		block: TestHash,
+	) -> Result<Vec<TestHash>, finality_grandpa::Error> {
 		let mut route = Vec::new();
 		let mut current_hash = block;
 		loop {
 			if current_hash == base {
-				break;
+				break
 			}
 			match self.0.parents.get(&current_hash).cloned() {
 				Some(parent_hash) => {
 					current_hash = parent_hash;
 					route.push(current_hash);
-				}
+				},
 				_ => return Err(finality_grandpa::Error::NotDescendent),
 			}
 		}
@@ -81,14 +85,11 @@ fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> {
 	vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]
 }
 
-/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a header finality.
+/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a
+/// header finality.
 pub fn minimal_voter_set() -> VoterSet<AuthorityId> {
-	VoterSet::new(
-		minimal_accounts_set()
-			.iter()
-			.map(|(id, w)| (AuthorityId::from(*id), *w)),
-	)
-	.unwrap()
+	VoterSet::new(minimal_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w)))
+		.unwrap()
 }
 
 /// Make a valid GRANDPA justification with sensible defaults.
@@ -174,14 +175,8 @@ fn same_result_when_justification_contains_duplicate_vote() {
 	let mut justification = make_default_justification(&test_header(1));
 	// the justification may contain exactly the same vote (i.e. same precommit and same signature)
 	// multiple times && it isn't treated as an error by original implementation
-	justification
-		.commit
-		.precommits
-		.push(justification.commit.precommits[0].clone());
-	justification
-		.commit
-		.precommits
-		.push(justification.commit.precommits[0].clone());
+	justification.commit.precommits.push(justification.commit.precommits[0].clone());
+	justification.commit.precommits.push(justification.commit.precommits[0].clone());
 
 	// our implementation succeeds
 	assert_eq!(
diff --git a/polkadot/bridges/primitives/message-dispatch/Cargo.toml b/polkadot/bridges/primitives/message-dispatch/Cargo.toml
index 9b24ae86a067858d8b748ba216116f438255edc9..9897b3199781db61b2e3dd98698b272cd92a4f8e 100644
--- a/polkadot/bridges/primitives/message-dispatch/Cargo.toml
+++ b/polkadot/bridges/primitives/message-dispatch/Cargo.toml
@@ -8,13 +8,13 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
 bp-runtime = { path = "../runtime", default-features = false }
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/primitives/message-dispatch/src/lib.rs b/polkadot/bridges/primitives/message-dispatch/src/lib.rs
index b2683674b776946920b2e28da87cf014746b0dc2..07e448ee7ae62604399c693e9195c7b43b796d09 100644
--- a/polkadot/bridges/primitives/message-dispatch/src/lib.rs
+++ b/polkadot/bridges/primitives/message-dispatch/src/lib.rs
@@ -35,7 +35,7 @@ pub type Weight = u64;
 pub type SpecVersion = u32;
 
 /// A generic trait to dispatch arbitrary messages delivered over the bridge.
-pub trait MessageDispatch<AccountId, MessageId> {
+pub trait MessageDispatch<AccountId, BridgeMessageId> {
 	/// A type of the message to be dispatched.
 	type Message: codec::Decode;
 
@@ -61,7 +61,7 @@ pub trait MessageDispatch<AccountId, MessageId> {
 	fn dispatch<P: FnOnce(&AccountId, Weight) -> Result<(), ()>>(
 		source_chain: ChainId,
 		target_chain: ChainId,
-		id: MessageId,
+		id: BridgeMessageId,
 		message: Result<Self::Message, ()>,
 		pay_dispatch_fee: P,
 	) -> MessageDispatchResult;
@@ -78,7 +78,7 @@ pub enum CallOrigin<SourceChainAccountId, TargetChainAccountPublic, TargetChainS
 	/// from a derived account.
 	///
 	/// The derived account represents the source Root account on the target chain. This is useful
-	/// if the target chain needs some way of knowing that a call came from a priviledged origin on
+	/// if the target chain needs some way of knowing that a call came from a privileged origin on
 	/// the source chain (maybe to allow a configuration change for example).
 	SourceRoot,
 
@@ -113,7 +113,12 @@ pub enum CallOrigin<SourceChainAccountId, TargetChainAccountPublic, TargetChainS
 
 /// Message payload type used by dispatch module.
 #[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)]
-pub struct MessagePayload<SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature, Call> {
+pub struct MessagePayload<
+	SourceChainAccountId,
+	TargetChainAccountPublic,
+	TargetChainSignature,
+	Call,
+> {
 	/// Runtime specification version. We only dispatch messages that have the same
 	/// runtime version. Otherwise we risk to misinterpret encoded calls.
 	pub spec_version: SpecVersion,
diff --git a/polkadot/bridges/primitives/messages/Cargo.toml b/polkadot/bridges/primitives/messages/Cargo.toml
index 191742005f443efc78a330d08774d41252dabd70..31ec46222cd890aa2a3e13a3724937c3e2e1cfc9 100644
--- a/polkadot/bridges/primitives/messages/Cargo.toml
+++ b/polkadot/bridges/primitives/messages/Cargo.toml
@@ -8,10 +8,10 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
 bitvec = { version = "0.20", default-features = false, features = ["alloc"] }
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive", "bit-vec"] }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false, features = ["derive", "bit-vec"] }
 impl-trait-for-tuples = "0.2"
-scale-info = { version = "1.0", default-features = false, features = ["derive"] }
-serde = { version = "1.0.101", optional = true, features = ["derive"] }
+scale-info = { version = "1.0", default-features = false, features = ["bit-vec", "derive"] }
+serde = { version = "1.0", optional = true, features = ["derive"] }
 
 # Bridge dependencies
 
@@ -19,9 +19,9 @@ bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/primitives/messages/src/lib.rs b/polkadot/bridges/primitives/messages/src/lib.rs
index 344735b53c91647640a619610f7ffb23f95af293..abefe8d789b8754d91edfac6aafdeb4c07d0c49c 100644
--- a/polkadot/bridges/primitives/messages/src/lib.rs
+++ b/polkadot/bridges/primitives/messages/src/lib.rs
@@ -76,7 +76,7 @@ pub type LaneId = [u8; 4];
 pub type MessageNonce = u64;
 
 /// Message id as a tuple.
-pub type MessageId = (LaneId, MessageNonce);
+pub type BridgeMessageId = (LaneId, MessageNonce);
 
 /// Opaque message payload. We only decode this payload when it is dispatched.
 pub type MessagePayload = Vec<u8>;
@@ -111,22 +111,23 @@ pub struct Message<Fee> {
 /// Inbound lane data.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
 pub struct InboundLaneData<RelayerId> {
-	/// Identifiers of relayers and messages that they have delivered to this lane (ordered by message nonce).
+	/// Identifiers of relayers and messages that they have delivered to this lane (ordered by
+	/// message nonce).
 	///
 	/// This serves as a helper storage item, to allow the source chain to easily pay rewards
-	/// to the relayers who succesfuly delivered messages to the target chain (inbound lane).
+	/// to the relayers who successfully delivered messages to the target chain (inbound lane).
 	///
 	/// It is guaranteed to have at most N entries, where N is configured at the module level.
 	/// If there are N entries in this vec, then:
-	/// 1) all incoming messages are rejected if they're missing corresponding `proof-of(outbound-lane.state)`;
-	/// 2) all incoming messages are rejected if `proof-of(outbound-lane.state).last_delivered_nonce` is
-	///    equal to `self.last_confirmed_nonce`.
-	/// Given what is said above, all nonces in this queue are in range:
-	/// `(self.last_confirmed_nonce; self.last_delivered_nonce()]`.
+	/// 1) all incoming messages are rejected if they're missing corresponding
+	/// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if
+	/// `proof-of(outbound-lane.state).last_delivered_nonce` is    equal to
+	/// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in
+	/// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`.
 	///
 	/// When a relayer sends a single message, both of MessageNonces are the same.
-	/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the highest nonce.
-	/// Multiple dispatches from the same relayer are allowed.
+	/// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the
+	/// highest nonce. Multiple dispatches from the same relayer are allowed.
 	pub relayers: VecDeque<UnrewardedRelayer<RelayerId>>,
 
 	/// Nonce of the last message that
@@ -142,24 +143,26 @@ pub struct InboundLaneData<RelayerId> {
 
 impl<RelayerId> Default for InboundLaneData<RelayerId> {
 	fn default() -> Self {
-		InboundLaneData {
-			relayers: VecDeque::new(),
-			last_confirmed_nonce: 0,
-		}
+		InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 }
 	}
 }
 
 impl<RelayerId> InboundLaneData<RelayerId> {
-	/// Returns approximate size of the struct, given number of entries in the `relayers` set and
+	/// Returns approximate size of the struct, given a number of entries in the `relayers` set and
 	/// size of each entry.
 	///
 	/// Returns `None` if size overflows `u32` limits.
-	pub fn encoded_size_hint(relayer_id_encoded_size: u32, relayers_entries: u32, messages_count: u32) -> Option<u32> {
+	pub fn encoded_size_hint(
+		relayer_id_encoded_size: u32,
+		relayers_entries: u32,
+		messages_count: u32,
+	) -> Option<u32> {
 		let message_nonce_size = 8;
 		let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?;
 		let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?;
 		let dispatch_results_per_byte = 8;
-		let dispatch_result_size = sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte);
+		let dispatch_result_size =
+			sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte);
 		relayers_size
 			.checked_add(message_nonce_size)
 			.and_then(|result| result.checked_add(dispatch_result_size))
@@ -194,8 +197,8 @@ pub type DispatchResultsBitVec = BitVec<Msb0, u8>;
 
 /// Unrewarded relayer entry stored in the inbound lane data.
 ///
-/// This struct represents a continuous range of messages that have been delivered by the same relayer
-/// and whose confirmations are still pending.
+/// This struct represents a continuous range of messages that have been delivered by the same
+/// relayer and whose confirmations are still pending.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
 pub struct UnrewardedRelayer<RelayerId> {
 	/// Identifier of the relayer.
@@ -218,7 +221,8 @@ pub struct DeliveredMessages {
 }
 
 impl DeliveredMessages {
-	/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given dispatch result.
+	/// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given
+	/// dispatch result.
 	pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self {
 		DeliveredMessages {
 			begin: nonce,
@@ -227,6 +231,15 @@ impl DeliveredMessages {
 		}
 	}
 
+	/// Return total count of delivered messages.
+	pub fn total_messages(&self) -> MessageNonce {
+		if self.end >= self.begin {
+			self.end - self.begin + 1
+		} else {
+			0
+		}
+	}
+
 	/// Note new dispatched message.
 	pub fn note_dispatched_message(&mut self, dispatch_result: bool) {
 		self.end += 1;
@@ -269,19 +282,20 @@ pub struct UnrewardedRelayersState {
 /// Outbound lane data.
 #[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
 pub struct OutboundLaneData {
-	/// Nonce of oldest message that we haven't yet pruned. May point to not-yet-generated message if
-	/// all sent messages are already pruned.
+	/// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated
+	/// message if all sent messages are already pruned.
 	pub oldest_unpruned_nonce: MessageNonce,
-	/// Nonce of latest message, received by bridged chain.
+	/// Nonce of the latest message, received by bridged chain.
 	pub latest_received_nonce: MessageNonce,
-	/// Nonce of latest message, generated by us.
+	/// Nonce of the latest message, generated by us.
 	pub latest_generated_nonce: MessageNonce,
 }
 
 impl Default for OutboundLaneData {
 	fn default() -> Self {
 		OutboundLaneData {
-			// it is 1 because we're pruning everything in [oldest_unpruned_nonce; latest_received_nonce]
+			// it is 1 because we're pruning everything in [oldest_unpruned_nonce;
+			// latest_received_nonce]
 			oldest_unpruned_nonce: 1,
 			latest_received_nonce: 0,
 			latest_generated_nonce: 0,
@@ -292,7 +306,9 @@ impl Default for OutboundLaneData {
 /// Returns total number of messages in the `InboundLaneData::relayers` vector.
 ///
 /// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`).
-pub fn total_unrewarded_messages<RelayerId>(relayers: &VecDeque<UnrewardedRelayer<RelayerId>>) -> Option<MessageNonce> {
+pub fn total_unrewarded_messages<RelayerId>(
+	relayers: &VecDeque<UnrewardedRelayer<RelayerId>>,
+) -> Option<MessageNonce> {
 	match (relayers.front(), relayers.back()) {
 		(Some(front), Some(back)) => {
 			if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) {
@@ -300,7 +316,7 @@ pub fn total_unrewarded_messages<RelayerId>(relayers: &VecDeque<UnrewardedRelaye
 			} else {
 				Some(0)
 			}
-		}
+		},
 		_ => Some(0),
 	}
 }
@@ -314,10 +330,7 @@ mod tests {
 		assert_eq!(
 			total_unrewarded_messages(
 				&vec![
-					UnrewardedRelayer {
-						relayer: 1,
-						messages: DeliveredMessages::new(0, true)
-					},
+					UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0, true) },
 					UnrewardedRelayer {
 						relayer: 2,
 						messages: DeliveredMessages::new(MessageNonce::MAX, true)
@@ -341,7 +354,11 @@ mod tests {
 			(13u8, 128u8),
 		];
 		for (relayer_entries, messages_count) in test_cases {
-			let expected_size = InboundLaneData::<u8>::encoded_size_hint(1, relayer_entries as _, messages_count as _);
+			let expected_size = InboundLaneData::<u8>::encoded_size_hint(
+				1,
+				relayer_entries as _,
+				messages_count as _,
+			);
 			let actual_size = InboundLaneData {
 				relayers: (1u8..=relayer_entries)
 					.map(|i| {
@@ -375,11 +392,8 @@ mod tests {
 
 	#[test]
 	fn message_dispatch_result_works() {
-		let delivered_messages = DeliveredMessages {
-			begin: 100,
-			end: 150,
-			dispatch_results: bitvec![Msb0, u8; 1; 151],
-		};
+		let delivered_messages =
+			DeliveredMessages { begin: 100, end: 150, dispatch_results: bitvec![Msb0, u8; 1; 151] };
 
 		assert!(!delivered_messages.contains_message(99));
 		assert!(delivered_messages.contains_message(100));
diff --git a/polkadot/bridges/primitives/messages/src/source_chain.rs b/polkadot/bridges/primitives/messages/src/source_chain.rs
index 392331eda6722c136a4b1423f64e334c8d5227fa..1ff05abf131eae97417354300640744c05a000c8 100644
--- a/polkadot/bridges/primitives/messages/src/source_chain.rs
+++ b/polkadot/bridges/primitives/messages/src/source_chain.rs
@@ -18,9 +18,14 @@
 
 use crate::{DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData};
 
+use crate::UnrewardedRelayer;
 use bp_runtime::Size;
-use frame_support::{Parameter, RuntimeDebug};
-use sp_std::{collections::btree_map::BTreeMap, fmt::Debug};
+use frame_support::{weights::Weight, Parameter, RuntimeDebug};
+use sp_std::{
+	collections::{btree_map::BTreeMap, vec_deque::VecDeque},
+	fmt::Debug,
+	ops::RangeInclusive,
+};
 
 /// The sender of the message on the source chain.
 pub type Sender<AccountId> = frame_system::RawOrigin<AccountId>;
@@ -56,14 +61,14 @@ pub trait TargetHeaderChain<Payload, AccountId> {
 	///
 	/// The proper implementation must ensure that the delivery-transaction with this
 	/// payload would (at least) be accepted into target chain transaction pool AND
-	/// eventually will be successfully 'mined'. The most obvious incorrect implementation
+	/// eventually will be successfully mined. The most obvious incorrect implementation
 	/// example would be implementation for BTC chain that accepts payloads larger than
 	/// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer
 	/// will be unable to craft valid transaction => this (and all subsequent) messages will
 	/// never be delivered.
 	fn verify_message(payload: &Payload) -> Result<(), Self::Error>;
 
-	/// Verify messages delivery proof and return lane && nonce of the latest recevied message.
+	/// Verify messages delivery proof and return lane && nonce of the latest received message.
 	fn verify_messages_delivery_proof(
 		proof: Self::MessagesDeliveryProof,
 	) -> Result<(LaneId, InboundLaneData<AccountId>), Self::Error>;
@@ -81,7 +86,8 @@ pub trait LaneMessageVerifier<Submitter, Payload, Fee> {
 	/// Error type.
 	type Error: Debug + Into<&'static str>;
 
-	/// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the lane.
+	/// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the
+	/// lane.
 	fn verify_message(
 		submitter: &Sender<Submitter>,
 		delivery_and_dispatch_fee: &Fee,
@@ -95,14 +101,14 @@ pub trait LaneMessageVerifier<Submitter, Payload, Fee> {
 /// submitter is paying (in source chain tokens/assets) for:
 ///
 /// 1) submit-message-transaction-fee itself. This fee is not included in the
-/// `delivery_and_dispatch_fee` and is witheld by the regular transaction payment mechanism;
+/// `delivery_and_dispatch_fee` and is withheld by the regular transaction payment mechanism;
 /// 2) message-delivery-transaction-fee. It is submitted to the target node by relayer;
 /// 3) message-dispatch fee. It is paid by relayer for processing message by target chain;
 /// 4) message-receiving-delivery-transaction-fee. It is submitted to the source node
 /// by relayer.
 ///
 /// So to be sure that any non-altruist relayer would agree to deliver message, submitter
-/// should set `delivery_and_dispatch_fee` to at least (equialent of): sum of fees from (2)
+/// should set `delivery_and_dispatch_fee` to at least (equivalent of): sum of fees from (2)
 /// to (4) above, plus some interest for the relayer.
 pub trait MessageDeliveryAndDispatchPayment<AccountId, Balance> {
 	/// Error type.
@@ -121,27 +127,98 @@ pub trait MessageDeliveryAndDispatchPayment<AccountId, Balance> {
 	/// The implementation may also choose to pay reward to the `confirmation_relayer`, which is
 	/// a relayer that has submitted delivery confirmation transaction.
 	fn pay_relayers_rewards(
+		lane_id: LaneId,
+		messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
 		confirmation_relayer: &AccountId,
-		relayers_rewards: RelayersRewards<AccountId, Balance>,
+		received_range: &RangeInclusive<MessageNonce>,
 		relayer_fund_account: &AccountId,
 	);
+}
+
+/// Send message artifacts.
+#[derive(RuntimeDebug, PartialEq)]
+pub struct SendMessageArtifacts {
+	/// Nonce of the message.
+	pub nonce: MessageNonce,
+	/// Actual weight of send message call.
+	pub weight: Weight,
+}
 
-	/// Perform some initialization in externalities-provided environment.
+/// Messages bridge API to be used from other pallets.
+pub trait MessagesBridge<AccountId, Balance, Payload> {
+	/// Error type.
+	type Error: Debug;
+
+	/// Send message over the bridge.
 	///
-	/// For instance you may ensure that particular required accounts or storage items are present.
-	/// Returns the number of storage reads performed.
-	fn initialize(_relayer_fund_account: &AccountId) -> usize {
-		0
+	/// Returns unique message nonce or error if send has failed.
+	fn send_message(
+		sender: Sender<AccountId>,
+		lane: LaneId,
+		message: Payload,
+		delivery_and_dispatch_fee: Balance,
+	) -> Result<SendMessageArtifacts, Self::Error>;
+}
+
+/// Bridge that does nothing when message is being sent.
+#[derive(RuntimeDebug, PartialEq)]
+pub struct NoopMessagesBridge;
+
+impl<AccountId, Balance, Payload> MessagesBridge<AccountId, Balance, Payload>
+	for NoopMessagesBridge
+{
+	type Error = &'static str;
+
+	fn send_message(
+		_sender: Sender<AccountId>,
+		_lane: LaneId,
+		_message: Payload,
+		_delivery_and_dispatch_fee: Balance,
+	) -> Result<SendMessageArtifacts, Self::Error> {
+		Ok(SendMessageArtifacts { nonce: 0, weight: 0 })
 	}
 }
 
 /// Handler for messages delivery confirmation.
-#[impl_trait_for_tuples::impl_for_tuples(30)]
 pub trait OnDeliveryConfirmed {
 	/// Called when we receive confirmation that our messages have been delivered to the
 	/// target chain. The confirmation also has single bit dispatch result for every
-	/// confirmed message (see `DeliveredMessages` for details).
-	fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) {}
+	/// confirmed message (see `DeliveredMessages` for details). Guaranteed to be called
+	/// only when at least one message is delivered.
+	///
+	/// Should return total weight consumed by the call.
+	///
+	/// NOTE: messages pallet assumes that maximal weight that may be spent on processing
+	/// single message is single DB read + single DB write. So this function shall never
+	/// return weight that is larger than total number of messages * (db read + db write).
+	/// If your pallet needs more time for processing single message, please do it
+	/// from `on_initialize` call(s) of the next block(s).
+	fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) -> Weight;
+}
+
+#[impl_trait_for_tuples::impl_for_tuples(30)]
+impl OnDeliveryConfirmed for Tuple {
+	fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight {
+		let mut total_weight: Weight = 0;
+		for_tuples!(
+			#(
+				total_weight = total_weight.saturating_add(Tuple::on_messages_delivered(lane, messages));
+			)*
+		);
+		total_weight
+	}
+}
+
+/// Handler for messages have been accepted
+pub trait OnMessageAccepted {
+	/// Called when a message has been accepted by message pallet.
+	fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight;
+}
+
+impl OnMessageAccepted for () {
+	fn on_messages_accepted(_lane: &LaneId, _message: &MessageNonce) -> Weight {
+		0
+	}
 }
 
 /// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and
@@ -149,7 +226,8 @@ pub trait OnDeliveryConfirmed {
 pub struct ForbidOutboundMessages;
 
 /// Error message that is used in `ForbidOutboundMessages` implementation.
-const ALL_OUTBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all outbound messages";
+const ALL_OUTBOUND_MESSAGES_REJECTED: &str =
+	"This chain is configured to reject all outbound messages";
 
 impl<Payload, AccountId> TargetHeaderChain<Payload, AccountId> for ForbidOutboundMessages {
 	type Error = &'static str;
@@ -167,7 +245,9 @@ impl<Payload, AccountId> TargetHeaderChain<Payload, AccountId> for ForbidOutboun
 	}
 }
 
-impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee> for ForbidOutboundMessages {
+impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee>
+	for ForbidOutboundMessages
+{
 	type Error = &'static str;
 
 	fn verify_message(
@@ -181,7 +261,9 @@ impl<Submitter, Payload, Fee> LaneMessageVerifier<Submitter, Payload, Fee> for F
 	}
 }
 
-impl<AccountId, Balance> MessageDeliveryAndDispatchPayment<AccountId, Balance> for ForbidOutboundMessages {
+impl<AccountId, Balance> MessageDeliveryAndDispatchPayment<AccountId, Balance>
+	for ForbidOutboundMessages
+{
 	type Error = &'static str;
 
 	fn pay_delivery_and_dispatch_fee(
@@ -193,8 +275,10 @@ impl<AccountId, Balance> MessageDeliveryAndDispatchPayment<AccountId, Balance> f
 	}
 
 	fn pay_relayers_rewards(
+		_lane_id: LaneId,
+		_messages_relayers: VecDeque<UnrewardedRelayer<AccountId>>,
 		_confirmation_relayer: &AccountId,
-		_relayers_rewards: RelayersRewards<AccountId, Balance>,
+		_received_range: &RangeInclusive<MessageNonce>,
 		_relayer_fund_account: &AccountId,
 	) {
 	}
diff --git a/polkadot/bridges/primitives/messages/src/target_chain.rs b/polkadot/bridges/primitives/messages/src/target_chain.rs
index 8730597637918b39b2fdc85056eb8a0cf09cced9..a84ea7af907de6cb279bda58823e69ace22ad0cd 100644
--- a/polkadot/bridges/primitives/messages/src/target_chain.rs
+++ b/polkadot/bridges/primitives/messages/src/target_chain.rs
@@ -76,7 +76,7 @@ pub trait SourceHeaderChain<Fee> {
 	/// messages will be rejected.
 	///
 	/// The `messages_count` argument verification (sane limits) is supposed to be made
-	/// outside of this function. This function only verifies that the proof declares exactly
+	/// outside this function. This function only verifies that the proof declares exactly
 	/// `messages_count` messages.
 	fn verify_messages_proof(
 		proof: Self::MessagesProof,
@@ -112,23 +112,19 @@ pub trait MessageDispatch<AccountId, Fee> {
 
 impl<Message> Default for ProvedLaneMessages<Message> {
 	fn default() -> Self {
-		ProvedLaneMessages {
-			lane_state: None,
-			messages: Vec::new(),
-		}
+		ProvedLaneMessages { lane_state: None, messages: Vec::new() }
 	}
 }
 
 impl<DispatchPayload: Decode, Fee> From<Message<Fee>> for DispatchMessage<DispatchPayload, Fee> {
 	fn from(message: Message<Fee>) -> Self {
-		DispatchMessage {
-			key: message.key,
-			data: message.data.into(),
-		}
+		DispatchMessage { key: message.key, data: message.data.into() }
 	}
 }
 
-impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>> for DispatchMessageData<DispatchPayload, Fee> {
+impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>>
+	for DispatchMessageData<DispatchPayload, Fee>
+{
 	fn from(data: MessageData<Fee>) -> Self {
 		DispatchMessageData {
 			payload: DispatchPayload::decode(&mut &data.payload[..]),
@@ -142,7 +138,8 @@ impl<DispatchPayload: Decode, Fee> From<MessageData<Fee>> for DispatchMessageDat
 pub struct ForbidInboundMessages;
 
 /// Error message that is used in `ForbidOutboundMessages` implementation.
-const ALL_INBOUND_MESSAGES_REJECTED: &str = "This chain is configured to reject all inbound messages";
+const ALL_INBOUND_MESSAGES_REJECTED: &str =
+	"This chain is configured to reject all inbound messages";
 
 impl<Fee> SourceHeaderChain<Fee> for ForbidInboundMessages {
 	type Error = &'static str;
@@ -163,7 +160,10 @@ impl<AccountId, Fee> MessageDispatch<AccountId, Fee> for ForbidInboundMessages {
 		Weight::MAX
 	}
 
-	fn dispatch(_: &AccountId, _: DispatchMessage<Self::DispatchPayload, Fee>) -> MessageDispatchResult {
+	fn dispatch(
+		_: &AccountId,
+		_: DispatchMessage<Self::DispatchPayload, Fee>,
+	) -> MessageDispatchResult {
 		MessageDispatchResult {
 			dispatch_result: false,
 			unspent_weight: 0,
diff --git a/polkadot/bridges/primitives/polkadot-core/Cargo.toml b/polkadot/bridges/primitives/polkadot-core/Cargo.toml
index 5e95c223ce2ec8c7986d93c8375c6cabf270a8a8..f05edd0d91ba3cfb68927ebbae037fcf1c71d6a3 100644
--- a/polkadot/bridges/primitives/polkadot-core/Cargo.toml
+++ b/polkadot/bridges/primitives/polkadot-core/Cargo.toml
@@ -7,7 +7,7 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-parity-scale-codec = { version = "2.0.0", default-features = false, features = ["derive"] }
+parity-scale-codec = { version = "2.2.0", default-features = false, features = ["derive"] }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Bridge Dependencies
@@ -17,9 +17,9 @@ bp-runtime = { path = "../runtime", default-features = false }
 
 # Substrate Based Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
diff --git a/polkadot/bridges/primitives/polkadot-core/src/lib.rs b/polkadot/bridges/primitives/polkadot-core/src/lib.rs
index 151e374e6603f085e307525c94ce7ae92c54b2b4..38e43d312b5d48412ee8bbf79f89bb20924bd1a7 100644
--- a/polkadot/bridges/primitives/polkadot-core/src/lib.rs
+++ b/polkadot/bridges/primitives/polkadot-core/src/lib.rs
@@ -29,6 +29,7 @@ use frame_support::{
 };
 use frame_system::limits;
 use parity_scale_codec::Compact;
+use scale_info::{StaticTypeInfo, TypeInfo};
 use sp_core::Hasher as HasherT;
 use sp_runtime::{
 	generic,
@@ -66,18 +67,19 @@ pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024;
 /// All polkadot-like chains are using same crypto.
 pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32;
 
-/// All Polkadot-like chains allow normal extrinsics to fill block up to 75%.
+/// All Polkadot-like chains allow normal extrinsics to fill block up to 75 percent.
 ///
 /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
 const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
 
-/// All Polkadot-like chains allow 2 seconds of compute with a 6 second average block time.
+/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time.
 ///
 /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
 pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND;
 
-/// All Polkadot-like chains assume that an on-initialize consumes 1% of the weight on average,
-/// hence a single extrinsic will not be allowed to consume more than `AvailableBlockRatio - 1%`.
+/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on
+/// average, hence a single extrinsic will not be allowed to consume more than
+/// `AvailableBlockRatio - 1 percent`.
 ///
 /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate.
 pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1);
@@ -113,7 +115,8 @@ parameter_types! {
 		.build_or_panic();
 }
 
-/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can use.
+/// Get the maximum weight (compute time) that a Normal extrinsic on the Polkadot-like chain can
+/// use.
 pub fn max_extrinsic_weight() -> Weight {
 	BlockWeights::get()
 		.get(DispatchClass::Normal)
@@ -138,6 +141,48 @@ pub const MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE: MessageNonce = 128;
 /// Maximal number of unconfirmed messages at inbound lane.
 pub const MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE: MessageNonce = 8192;
 
+// One important thing about weight-related constants here is that actually we may have
+// different weights on different Polkadot-like chains. But now all deployments are
+// almost the same, so we're exporting constants from this crate.
+
+/// Maximal weight of single message delivery confirmation transaction on Polkadot-like chain.
+///
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof`
+/// weight formula computation for the case when single message is confirmed. The result then must
+/// be rounded up to account possible future runtime upgrades.
+pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000;
+
+/// Increase of delivery transaction weight on Polkadot-like chain with every additional message
+/// byte.
+///
+/// This value is a result of
+/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then
+/// must be rounded up to account possible future runtime upgrades.
+pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000;
+
+/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded
+/// call itself.
+///
+/// Can be computed by subtracting encoded call size from raw transaction size.
+pub const TX_EXTRA_BYTES: u32 = 256;
+
+/// Weight of single regular message delivery transaction on Polkadot-like chain.
+///
+/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call
+/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH`
+/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be
+/// rounded up to account possible future runtime upgrades.
+pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000;
+
+/// Weight of pay-dispatch-fee operation for inbound messages at Polkadot-like chain.
+///
+/// This value corresponds to the result of
+/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your
+/// chain. Don't put too much reserve there, because it is used to **decrease**
+/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery
+/// transactions cheaper.
+pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000;
+
 /// Re-export `time_units` to make usage easier.
 pub use time_units::*;
 
@@ -165,7 +210,7 @@ pub type Index = u32;
 /// Hashing type.
 pub type Hashing = BlakeTwo256;
 
-/// The type of an object that can produce hashes on Polkadot-like chains.
+/// The type of object that can produce hashes on Polkadot-like chains.
 pub type Hasher = BlakeTwo256;
 
 /// The header type used by Polkadot-like chains.
@@ -180,6 +225,9 @@ pub type AccountPublic = <Signature as Verify>::Signer;
 /// Id of account on Polkadot-like chains.
 pub type AccountId = <AccountPublic as IdentifyAccount>::AccountId;
 
+/// Address of account on Polkadot-like chains.
+pub type AccountAddress = MultiAddress<AccountId, ()>;
+
 /// Index of a transaction on the Polkadot-like chains.
 pub type Nonce = u32;
 
@@ -194,18 +242,13 @@ pub type Balance = u128;
 
 /// Unchecked Extrinsic type.
 pub type UncheckedExtrinsic<Call> =
-	generic::UncheckedExtrinsic<MultiAddress<AccountId, ()>, Call, Signature, SignedExtensions<Call>>;
+	generic::UncheckedExtrinsic<AccountAddress, Call, Signature, SignedExtensions<Call>>;
+
+/// Account address, used by the Polkadot-like chain.
+pub type Address = MultiAddress<AccountId, ()>;
 
 /// A type of the data encoded as part of the transaction.
-pub type SignedExtra = (
-	(),
-	(),
-	(),
-	sp_runtime::generic::Era,
-	Compact<Nonce>,
-	(),
-	Compact<Balance>,
-);
+pub type SignedExtra = ((), (), (), sp_runtime::generic::Era, Compact<Nonce>, (), Compact<Balance>);
 
 /// Parameters which are part of the payload used to produce transaction signature,
 /// but don't end up in the transaction itself (i.e. inherent part of the runtime).
@@ -213,7 +256,7 @@ pub type AdditionalSigned = (u32, u32, Hash, Hash, (), (), ());
 
 /// A simplified version of signed extensions meant for producing signed transactions
 /// and signed payload in the client code.
-#[derive(PartialEq, Eq, Clone, RuntimeDebug, scale_info::TypeInfo)]
+#[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)]
 pub struct SignedExtensions<Call> {
 	encode_payload: SignedExtra,
 	additional_signed: AdditionalSigned,
@@ -227,7 +270,9 @@ impl<Call> parity_scale_codec::Encode for SignedExtensions<Call> {
 }
 
 impl<Call> parity_scale_codec::Decode for SignedExtensions<Call> {
-	fn decode<I: parity_scale_codec::Input>(_input: &mut I) -> Result<Self, parity_scale_codec::Error> {
+	fn decode<I: parity_scale_codec::Input>(
+		_input: &mut I,
+	) -> Result<Self, parity_scale_codec::Error> {
 		unimplemented!("SignedExtensions are never meant to be decoded, they are only used to create transaction");
 	}
 }
@@ -235,26 +280,26 @@ impl<Call> parity_scale_codec::Decode for SignedExtensions<Call> {
 impl<Call> SignedExtensions<Call> {
 	pub fn new(
 		version: sp_version::RuntimeVersion,
-		era: sp_runtime::generic::Era,
+		era: bp_runtime::TransactionEraOf<PolkadotLike>,
 		genesis_hash: Hash,
 		nonce: Nonce,
 		tip: Balance,
 	) -> Self {
 		Self {
 			encode_payload: (
-				(),           // spec version
-				(),           // tx version
-				(),           // genesis
-				era,          // era
-				nonce.into(), // nonce (compact encoding)
-				(),           // Check weight
-				tip.into(),   // transaction payment / tip (compact encoding)
+				(),              // spec version
+				(),              // tx version
+				(),              // genesis
+				era.frame_era(), // era
+				nonce.into(),    // nonce (compact encoding)
+				(),              // Check weight
+				tip.into(),      // transaction payment / tip (compact encoding)
 			),
 			additional_signed: (
 				version.spec_version,
 				version.transaction_version,
 				genesis_hash,
-				genesis_hash,
+				era.signed_payload(genesis_hash),
 				(),
 				(),
 				(),
@@ -264,6 +309,18 @@ impl<Call> SignedExtensions<Call> {
 	}
 }
 
+impl<Call> SignedExtensions<Call> {
+	/// Return signer nonce, used to craft transaction.
+	pub fn nonce(&self) -> Nonce {
+		self.encode_payload.4.into()
+	}
+
+	/// Return transaction tip.
+	pub fn tip(&self) -> Balance {
+		self.encode_payload.6.into()
+	}
+}
+
 impl<Call> sp_runtime::traits::SignedExtension for SignedExtensions<Call>
 where
 	Call: parity_scale_codec::Codec
@@ -273,7 +330,7 @@ where
 		+ Clone
 		+ Eq
 		+ PartialEq
-		+ scale_info::StaticTypeInfo,
+		+ StaticTypeInfo,
 	Call: Dispatchable,
 {
 	const IDENTIFIER: &'static str = "Not needed.";
@@ -283,7 +340,9 @@ where
 	type AdditionalSigned = AdditionalSigned;
 	type Pre = ();
 
-	fn additional_signed(&self) -> Result<Self::AdditionalSigned, frame_support::unsigned::TransactionValidityError> {
+	fn additional_signed(
+		&self,
+	) -> Result<Self::AdditionalSigned, frame_support::unsigned::TransactionValidityError> {
 		Ok(self.additional_signed)
 	}
 }
@@ -297,6 +356,11 @@ impl Chain for PolkadotLike {
 	type Hash = Hash;
 	type Hasher = Hasher;
 	type Header = Header;
+
+	type AccountId = AccountId;
+	type Balance = Balance;
+	type Index = Index;
+	type Signature = Signature;
 }
 
 /// Convert a 256-bit hash into an AccountId.
@@ -311,7 +375,7 @@ impl Convert<sp_core::H256, AccountId> for AccountIdConverter {
 /// Return a storage key for account data.
 ///
 /// This is based on FRAME storage-generation code from Substrate:
-/// https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74
+/// [link](https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74)
 /// The equivalent command to invoke in case full `Runtime` is known is this:
 /// `let key = frame_system::Account::<Runtime>::storage_map_final_key(&account_id);`
 pub fn account_info_storage_key(id: &AccountId) -> Vec<u8> {
@@ -319,7 +383,9 @@ pub fn account_info_storage_key(id: &AccountId) -> Vec<u8> {
 	let storage_prefix_hashed = Twox128::hash(b"Account");
 	let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash);
 
-	let mut final_key = Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len());
+	let mut final_key = Vec::with_capacity(
+		module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(),
+	);
 
 	final_key.extend_from_slice(&module_prefix_hashed[..]);
 	final_key.extend_from_slice(&storage_prefix_hashed[..]);
@@ -347,8 +413,8 @@ mod tests {
 	#[test]
 	fn should_generate_storage_key() {
 		let acc = [
-			1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
-			30, 31, 32,
+			1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+			25, 26, 27, 28, 29, 30, 31, 32,
 		]
 		.into();
 		let key = account_info_storage_key(&acc);
diff --git a/polkadot/bridges/primitives/runtime/Cargo.toml b/polkadot/bridges/primitives/runtime/Cargo.toml
index cdd47e30d3f8ca313dc9488b8ac4c17e05c86d84..944f84a6c683545b78928575a5d4c0896ef3ee97 100644
--- a/polkadot/bridges/primitives/runtime/Cargo.toml
+++ b/polkadot/bridges/primitives/runtime/Cargo.toml
@@ -7,24 +7,20 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 hash-db = { version = "0.15.2", default-features = false }
 num-traits = { version = "0.2", default-features = false }
 scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Substrate Dependencies
 
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" , default-features = false }
-
-[dev-dependencies]
-sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
 
 [features]
 default = ["std"]
diff --git a/polkadot/bridges/primitives/runtime/src/chain.rs b/polkadot/bridges/primitives/runtime/src/chain.rs
index cb19c6e72681d8a1d6efd3fea0ed2f66910f6445..e24694bf8b0f86ea1c34f3f3a6c0e13eb0a61504 100644
--- a/polkadot/bridges/primitives/runtime/src/chain.rs
+++ b/polkadot/bridges/primitives/runtime/src/chain.rs
@@ -15,12 +15,15 @@
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
 use frame_support::Parameter;
-use num_traits::AsPrimitive;
-use sp_runtime::traits::{
-	AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerializeDeserialize,
-	Member, SimpleBitOps,
+use num_traits::{AsPrimitive, Bounded, CheckedSub, SaturatingAdd, Zero};
+use sp_runtime::{
+	traits::{
+		AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay,
+		MaybeMallocSizeOf, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify,
+	},
+	FixedPointOperand,
 };
-use sp_std::str::FromStr;
+use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr};
 
 /// Minimal Substrate-based chain representation that may be used from no_std environment.
 pub trait Chain: Send + Sync + 'static {
@@ -34,7 +37,7 @@ pub trait Chain: Send + Sync + 'static {
 	type BlockNumber: Parameter
 		+ Member
 		+ MaybeSerializeDeserialize
-		+ sp_std::hash::Hash
+		+ Hash
 		+ Copy
 		+ Default
 		+ MaybeDisplay
@@ -42,7 +45,10 @@ pub trait Chain: Send + Sync + 'static {
 		+ FromStr
 		+ MaybeMallocSizeOf
 		+ AsPrimitive<usize>
-		+ Default;
+		+ Default
+		// original `sp_runtime::traits::Header::BlockNumber` doesn't have this trait, but
+		// `sp_runtime::generic::Era` requires block number -> `u64` conversion.
+		+ Into<u64>;
 
 	/// A type that fulfills the abstract idea of what a Substrate hash is.
 	// Constraits come from the associated Hash type of `sp_runtime::traits::Header`
@@ -51,7 +57,7 @@ pub trait Chain: Send + Sync + 'static {
 	type Hash: Parameter
 		+ Member
 		+ MaybeSerializeDeserialize
-		+ sp_std::hash::Hash
+		+ Hash
 		+ Ord
 		+ Copy
 		+ MaybeDisplay
@@ -71,7 +77,48 @@ pub trait Chain: Send + Sync + 'static {
 	/// A type that fulfills the abstract idea of what a Substrate header is.
 	// See here for more info:
 	// https://crates.parity.io/sp_runtime/traits/trait.Header.html
-	type Header: Parameter + HeaderT<Number = Self::BlockNumber, Hash = Self::Hash> + MaybeSerializeDeserialize;
+	type Header: Parameter
+		+ HeaderT<Number = Self::BlockNumber, Hash = Self::Hash>
+		+ MaybeSerializeDeserialize;
+
+	/// The user account identifier type for the runtime.
+	type AccountId: Parameter
+		+ Member
+		+ MaybeSerializeDeserialize
+		+ Debug
+		+ MaybeDisplay
+		+ Ord
+		+ Default;
+	/// Balance of an account in native tokens.
+	///
+	/// The chain may support multiple tokens, but this particular type is for token that is used
+	/// to pay for transaction dispatch, to reward different relayers (headers, messages), etc.
+	type Balance: AtLeast32BitUnsigned
+		+ FixedPointOperand
+		+ Parameter
+		+ Parameter
+		+ Member
+		+ MaybeSerializeDeserialize
+		+ Clone
+		+ Copy
+		+ Bounded
+		+ CheckedSub
+		+ PartialOrd
+		+ SaturatingAdd
+		+ Zero
+		+ TryFrom<sp_core::U256>;
+	/// Index of a transaction used by the chain.
+	type Index: Parameter
+		+ Member
+		+ MaybeSerialize
+		+ Debug
+		+ Default
+		+ MaybeDisplay
+		+ MaybeSerializeDeserialize
+		+ AtLeast32Bit
+		+ Copy;
+	/// Signature type, used on this chain.
+	type Signature: Parameter + Verify;
 }
 
 /// Block number used by the chain.
@@ -85,3 +132,21 @@ pub type HasherOf<C> = <C as Chain>::Hasher;
 
 /// Header type used by the chain.
 pub type HeaderOf<C> = <C as Chain>::Header;
+
+/// Account id type used by the chain.
+pub type AccountIdOf<C> = <C as Chain>::AccountId;
+
+/// Balance type used by the chain.
+pub type BalanceOf<C> = <C as Chain>::Balance;
+
+/// Transaction index type used by the chain.
+pub type IndexOf<C> = <C as Chain>::Index;
+
+/// Signature type used by the chain.
+pub type SignatureOf<C> = <C as Chain>::Signature;
+
+/// Account public type used by the chain.
+pub type AccountPublicOf<C> = <SignatureOf<C> as Verify>::Signer;
+
+/// Transaction era used by the chain.
+pub type TransactionEraOf<C> = crate::TransactionEra<BlockNumberOf<C>, HashOf<C>>;
diff --git a/polkadot/bridges/primitives/runtime/src/lib.rs b/polkadot/bridges/primitives/runtime/src/lib.rs
index a4bb400a93c45f273c92ef250001a9cb416f2649..460f1b19dfe3f0b247a58ce1ab57778a883261be 100644
--- a/polkadot/bridges/primitives/runtime/src/lib.rs
+++ b/polkadot/bridges/primitives/runtime/src/lib.rs
@@ -19,11 +19,16 @@
 #![cfg_attr(not(feature = "std"), no_std)]
 
 use codec::Encode;
-use sp_core::hash::H256;
+use frame_support::{RuntimeDebug, StorageHasher};
+use sp_core::{hash::H256, storage::StorageKey};
 use sp_io::hashing::blake2_256;
-use sp_std::convert::TryFrom;
+use sp_std::{convert::TryFrom, vec::Vec};
 
-pub use chain::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf};
+pub use chain::{
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf,
+	IndexOf, SignatureOf, TransactionEraOf,
+};
+pub use frame_support::storage::storage_prefix as storage_value_final_key;
 pub use storage_proof::{Error as StorageProofError, StorageProofChecker};
 
 #[cfg(feature = "std")]
@@ -64,19 +69,24 @@ pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/
 /// A unique prefix for entropy when generating a cross-chain account ID for the Root account.
 pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root";
 
+/// Generic header Id.
+#[derive(RuntimeDebug, Default, Clone, Copy, Eq, Hash, PartialEq)]
+pub struct HeaderId<Hash, Number>(pub Number, pub Hash);
+
 /// Unique identifier of the chain.
 ///
 /// In addition to its main function (identifying the chain), this type may also be used to
 /// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g.
-/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and Chain2.
-/// Sometimes we need to be able to identify deployed instance dynamically. This type may be used for that.
+/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and
+/// Chain2. Sometimes we need to be able to identify deployed instance dynamically. This type may be
+/// used for that.
 pub type ChainId = [u8; 4];
 
 /// Type of accounts on the source chain.
 pub enum SourceAccount<T> {
 	/// An account that belongs to Root (privileged origin).
 	Root,
-	/// A non-priviledged account.
+	/// A non-privileged account.
 	///
 	/// The embedded account ID may or may not have a private key depending on the "owner" of the
 	/// account (private key, pallet, proxy, etc.).
@@ -99,8 +109,10 @@ where
 	AccountId: Encode,
 {
 	match id {
-		SourceAccount::Root => (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256),
-		SourceAccount::Account(id) => (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256),
+		SourceAccount::Root =>
+			(ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256),
+		SourceAccount::Account(id) =>
+			(ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256),
 	}
 	.into()
 }
@@ -109,8 +121,8 @@ where
 ///
 /// This account is used to collect fees for relayers that are passing messages across the bridge.
 ///
-/// The account ID can be the same across different instances of `pallet-bridge-messages` if the same
-/// `bridge_id` is used.
+/// The account ID can be the same across different instances of `pallet-bridge-messages` if the
+/// same `bridge_id` is used.
 pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 {
 	("relayer-fund-account", bridge_id).using_encoded(blake2_256).into()
 }
@@ -124,6 +136,12 @@ pub trait Size {
 	fn size_hint(&self) -> u32;
 }
 
+impl Size for &[u8] {
+	fn size_hint(&self) -> u32 {
+		self.len() as _
+	}
+}
+
 impl Size for () {
 	fn size_hint(&self) -> u32 {
 		0
@@ -138,3 +156,110 @@ impl Size for PreComputedSize {
 		u32::try_from(self.0).unwrap_or(u32::MAX)
 	}
 }
+
+/// Era of specific transaction.
+#[derive(RuntimeDebug, Clone, Copy)]
+pub enum TransactionEra<BlockNumber, BlockHash> {
+	/// Transaction is immortal.
+	Immortal,
+	/// Transaction is valid for a given number of blocks, starting from given block.
+	Mortal(HeaderId<BlockHash, BlockNumber>, u32),
+}
+
+impl<BlockNumber: Copy + Into<u64>, BlockHash: Copy> TransactionEra<BlockNumber, BlockHash> {
+	/// Prepare transaction era, based on mortality period and current best block number.
+	pub fn new(
+		best_block_id: HeaderId<BlockHash, BlockNumber>,
+		mortality_period: Option<u32>,
+	) -> Self {
+		mortality_period
+			.map(|mortality_period| TransactionEra::Mortal(best_block_id, mortality_period))
+			.unwrap_or(TransactionEra::Immortal)
+	}
+
+	/// Create new immortal transaction era.
+	pub fn immortal() -> Self {
+		TransactionEra::Immortal
+	}
+
+	/// Returns era that is used by FRAME-based runtimes.
+	pub fn frame_era(&self) -> sp_runtime::generic::Era {
+		match *self {
+			TransactionEra::Immortal => sp_runtime::generic::Era::immortal(),
+			TransactionEra::Mortal(header_id, period) =>
+				sp_runtime::generic::Era::mortal(period as _, header_id.0.into()),
+		}
+	}
+
+	/// Returns header hash that needs to be included in the signature payload.
+	pub fn signed_payload(&self, genesis_hash: BlockHash) -> BlockHash {
+		match *self {
+			TransactionEra::Immortal => genesis_hash,
+			TransactionEra::Mortal(header_id, _) => header_id.1,
+		}
+	}
+}
+
+/// This is a copy of the
+/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Blake2_128Concat`
+/// maps.
+///
+/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime
+/// and pallet instance, which (sometimes) is impossible.
+pub fn storage_map_final_key_blake2_128concat(
+	pallet_prefix: &str,
+	map_name: &str,
+	key: &[u8],
+) -> StorageKey {
+	storage_map_final_key_identity(
+		pallet_prefix,
+		map_name,
+		&frame_support::Blake2_128Concat::hash(key),
+	)
+}
+
+///
+pub fn storage_map_final_key_twox64_concat(
+	pallet_prefix: &str,
+	map_name: &str,
+	key: &[u8],
+) -> StorageKey {
+	storage_map_final_key_identity(pallet_prefix, map_name, &frame_support::Twox64Concat::hash(key))
+}
+
+/// This is a copy of the
+/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for `Identity` maps.
+///
+/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime
+/// and pallet instance, which (sometimes) is impossible.
+pub fn storage_map_final_key_identity(
+	pallet_prefix: &str,
+	map_name: &str,
+	key_hashed: &[u8],
+) -> StorageKey {
+	let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes());
+	let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes());
+
+	let mut final_key = Vec::with_capacity(
+		pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(),
+	);
+
+	final_key.extend_from_slice(&pallet_prefix_hashed[..]);
+	final_key.extend_from_slice(&storage_prefix_hashed[..]);
+	final_key.extend_from_slice(key_hashed.as_ref());
+
+	StorageKey(final_key)
+}
+
+/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false;
+/// }`) is computed.
+///
+/// Copied from `frame_support::parameter_types` macro
+pub fn storage_parameter_key(parameter_name: &str) -> StorageKey {
+	let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1 + 1);
+	buffer.push(b':');
+	buffer.extend_from_slice(parameter_name.as_bytes());
+	buffer.push(b':');
+	buffer.push(0);
+	StorageKey(sp_io::hashing::twox_128(&buffer).to_vec())
+}
diff --git a/polkadot/bridges/primitives/runtime/src/messages.rs b/polkadot/bridges/primitives/runtime/src/messages.rs
index 3ab867773f9f67a837a1035629ecd71eb40ac30b..7a6687c18b776e311dccab8a929abedfd7310f8e 100644
--- a/polkadot/bridges/primitives/runtime/src/messages.rs
+++ b/polkadot/bridges/primitives/runtime/src/messages.rs
@@ -23,7 +23,7 @@ use scale_info::TypeInfo;
 /// Where message dispatch fee is paid?
 #[derive(Encode, Decode, RuntimeDebug, Clone, Copy, PartialEq, Eq, TypeInfo)]
 pub enum DispatchFeePayment {
-	/// The dispacth fee is paid at the source chain.
+	/// The dispatch fee is paid at the source chain.
 	AtSourceChain,
 	/// The dispatch fee is paid at the target chain.
 	///
@@ -51,7 +51,7 @@ pub struct MessageDispatchResult {
 	/// 2) if message has not been dispatched at all.
 	pub unspent_weight: Weight,
 	/// Whether the message dispatch fee has been paid during dispatch. This will be true if your
-	/// configuration supports pay-dispatch-fee-at-target-chain option and message sender has enabled
-	/// this option.
+	/// configuration supports pay-dispatch-fee-at-target-chain option and message sender has
+	/// enabled this option.
 	pub dispatch_fee_paid_during_dispatch: bool,
 }
diff --git a/polkadot/bridges/primitives/runtime/src/storage_proof.rs b/polkadot/bridges/primitives/runtime/src/storage_proof.rs
index d70be93b1d2513648011351be1bcfd854095325b..9cc5b48ebd913319e4be1f29be5ea8dbcb268e60 100644
--- a/polkadot/bridges/primitives/runtime/src/storage_proof.rs
+++ b/polkadot/bridges/primitives/runtime/src/storage_proof.rs
@@ -42,7 +42,7 @@ where
 	pub fn new(root: H::Out, proof: StorageProof) -> Result<Self, Error> {
 		let db = proof.into_memory_db();
 		if !db.contains(&root, EMPTY_PREFIX) {
-			return Err(Error::StorageRootMismatch);
+			return Err(Error::StorageRootMismatch)
 		}
 
 		let checker = StorageProofChecker { root, db };
@@ -52,7 +52,8 @@ where
 	/// Reads a value from the available subset of storage. If the value cannot be read due to an
 	/// incomplete or otherwise invalid proof, this returns an error.
 	pub fn read_value(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
-		read_trie_value::<Layout<H>, _>(&self.db, &self.root, key).map_err(|_| Error::StorageValueUnavailable)
+		read_trie_value::<Layout<H>, _>(&self.db, &self.root, key)
+			.map_err(|_| Error::StorageValueUnavailable)
 	}
 }
 
@@ -97,7 +98,8 @@ pub mod tests {
 		let (root, proof) = craft_valid_storage_proof();
 
 		// check proof in runtime
-		let checker = <StorageProofChecker<sp_core::Blake2Hasher>>::new(root, proof.clone()).unwrap();
+		let checker =
+			<StorageProofChecker<sp_core::Blake2Hasher>>::new(root, proof.clone()).unwrap();
 		assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec())));
 		assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec())));
 		assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable));
diff --git a/polkadot/bridges/primitives/test-utils/Cargo.toml b/polkadot/bridges/primitives/test-utils/Cargo.toml
index 14d3c031b4c8d4fa22952408c5518f4d4922c4e0..95121c60993a7e675f56fdb92cc6021f5b4b7761 100644
--- a/polkadot/bridges/primitives/test-utils/Cargo.toml
+++ b/polkadot/bridges/primitives/test-utils/Cargo.toml
@@ -7,7 +7,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
 bp-header-chain = { path = "../header-chain", default-features = false  }
-codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+codec = { package = "parity-scale-codec", version = "2.2.0", default-features = false }
 ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] }
 finality-grandpa = { version = "0.14.4", default-features = false }
 sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
diff --git a/polkadot/bridges/primitives/test-utils/src/keyring.rs b/polkadot/bridges/primitives/test-utils/src/keyring.rs
index b83678cae5e5196033e2c94a982945a7c6d3b857..059d6eb5be4f1e910679eab6e1a7db727fbcb506 100644
--- a/polkadot/bridges/primitives/test-utils/src/keyring.rs
+++ b/polkadot/bridges/primitives/test-utils/src/keyring.rs
@@ -45,7 +45,8 @@ impl Account {
 		let data = self.0.encode();
 		let mut bytes = [0_u8; 32];
 		bytes[0..data.len()].copy_from_slice(&*data);
-		SecretKey::from_bytes(&bytes).expect("A static array of the correct length is a known good.")
+		SecretKey::from_bytes(&bytes)
+			.expect("A static array of the correct length is a known good.")
 	}
 
 	pub fn pair(&self) -> Keypair {
@@ -57,7 +58,8 @@ impl Account {
 		let public = self.public();
 		pair[32..].copy_from_slice(&public.to_bytes());
 
-		Keypair::from_bytes(&pair).expect("We expect the SecretKey to be good, so this must also be good.")
+		Keypair::from_bytes(&pair)
+			.expect("We expect the SecretKey to be good, so this must also be good.")
 	}
 
 	pub fn sign(&self, msg: &[u8]) -> Signature {
@@ -79,10 +81,7 @@ pub fn voter_set() -> VoterSet<AuthorityId> {
 
 /// Convenience function to get a list of Grandpa authorities.
 pub fn authority_list() -> AuthorityList {
-	test_keyring()
-		.iter()
-		.map(|(id, w)| (AuthorityId::from(*id), *w))
-		.collect()
+	test_keyring().iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect()
 }
 
 /// Get the corresponding identities from the keyring for the "standard" authority set.
diff --git a/polkadot/bridges/primitives/test-utils/src/lib.rs b/polkadot/bridges/primitives/test-utils/src/lib.rs
index 64109754086cf6cfd710afb68f666d098d436678..9e044ed472dd743ae93d459bd9f2b713aae8931b 100644
--- a/polkadot/bridges/primitives/test-utils/src/lib.rs
+++ b/polkadot/bridges/primitives/test-utils/src/lib.rs
@@ -21,8 +21,7 @@
 use bp_header_chain::justification::GrandpaJustification;
 use codec::Encode;
 use sp_application_crypto::TryFrom;
-use sp_finality_grandpa::{AuthorityId, AuthorityWeight};
-use sp_finality_grandpa::{AuthoritySignature, SetId};
+use sp_finality_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId};
 use sp_runtime::traits::{Header as HeaderT, One, Zero};
 use sp_std::prelude::*;
 
@@ -49,7 +48,7 @@ pub struct JustificationGeneratorParams<H> {
 	pub authorities: Vec<(Account, AuthorityWeight)>,
 	/// The total number of precommit ancestors in the `votes_ancestries` field our justification.
 	///
-	/// These may be distributed among many different forks.
+	/// These may be distributed among many forks.
 	pub ancestors: u32,
 	/// The number of forks.
 	///
@@ -72,10 +71,7 @@ impl<H: HeaderT> Default for JustificationGeneratorParams<H> {
 
 /// Make a valid GRANDPA justification with sensible defaults
 pub fn make_default_justification<H: HeaderT>(header: &H) -> GrandpaJustification<H> {
-	let params = JustificationGeneratorParams::<H> {
-		header: header.clone(),
-		..Default::default()
-	};
+	let params = JustificationGeneratorParams::<H> { header: header.clone(), ..Default::default() };
 
 	make_justification_for_header(params)
 }
@@ -89,15 +85,11 @@ pub fn make_default_justification<H: HeaderT>(header: &H) -> GrandpaJustificatio
 ///
 /// Note: This needs at least three authorities or else the verifier will complain about
 /// being given an invalid commit.
-pub fn make_justification_for_header<H: HeaderT>(params: JustificationGeneratorParams<H>) -> GrandpaJustification<H> {
-	let JustificationGeneratorParams {
-		header,
-		round,
-		set_id,
-		authorities,
-		mut ancestors,
-		forks,
-	} = params;
+pub fn make_justification_for_header<H: HeaderT>(
+	params: JustificationGeneratorParams<H>,
+) -> GrandpaJustification<H> {
+	let JustificationGeneratorParams { header, round, set_id, authorities, mut ancestors, forks } =
+		params;
 	let (target_hash, target_number) = (header.hash(), *header.number());
 	let mut votes_ancestries = vec![];
 	let mut precommits = vec![];
@@ -144,11 +136,7 @@ pub fn make_justification_for_header<H: HeaderT>(params: JustificationGeneratorP
 
 	GrandpaJustification {
 		round,
-		commit: finality_grandpa::Commit {
-			target_hash,
-			target_number,
-			precommits,
-		},
+		commit: finality_grandpa::Commit { target_hash, target_number, precommits },
 		votes_ancestries,
 	}
 }
@@ -165,10 +153,7 @@ fn generate_chain<H: HeaderT>(fork_id: u32, depth: u32, ancestor: &H) -> Vec<H>
 
 		// Modifying the digest so headers at the same height but in different forks have different
 		// hashes
-		header
-			.digest_mut()
-			.logs
-			.push(sp_runtime::DigestItem::Other(fork_id.encode()));
+		header.digest_mut().logs.push(sp_runtime::DigestItem::Other(fork_id.encode()));
 
 		headers.push(header);
 	}
@@ -183,29 +168,26 @@ pub fn signed_precommit<H: HeaderT>(
 	round: u64,
 	set_id: SetId,
 ) -> finality_grandpa::SignedPrecommit<H::Hash, H::Number, AuthoritySignature, AuthorityId> {
-	let precommit = finality_grandpa::Precommit {
-		target_hash: target.0,
-		target_number: target.1,
-	};
+	let precommit = finality_grandpa::Precommit { target_hash: target.0, target_number: target.1 };
 
-	let encoded =
-		sp_finality_grandpa::localized_payload(round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()));
+	let encoded = sp_finality_grandpa::localized_payload(
+		round,
+		set_id,
+		&finality_grandpa::Message::Precommit(precommit.clone()),
+	);
 
 	let signature = signer.sign(&encoded);
 	let raw_signature: Vec<u8> = signature.to_bytes().into();
 
-	// Need to wrap our signature and id types that they match what our `SignedPrecommit` is expecting
+	// Need to wrap our signature and id types that they match what our `SignedPrecommit` is
+	// expecting
 	let signature = AuthoritySignature::try_from(raw_signature).expect(
 		"We know our Keypair is good,
 		so our signature must also be good.",
 	);
 	let id = (*signer).into();
 
-	finality_grandpa::SignedPrecommit {
-		precommit,
-		signature,
-		id,
-	}
+	finality_grandpa::SignedPrecommit { precommit, signature, id }
 }
 
 /// Get a header for testing.
@@ -213,13 +195,7 @@ pub fn signed_precommit<H: HeaderT>(
 /// The correct parent hash will be used if given a non-zero header.
 pub fn test_header<H: HeaderT>(number: H::Number) -> H {
 	let default = |num| {
-		H::new(
-			num,
-			Default::default(),
-			Default::default(),
-			Default::default(),
-			Default::default(),
-		)
+		H::new(num, Default::default(), Default::default(), Default::default(), Default::default())
 	};
 
 	let mut header = default(number);
diff --git a/polkadot/bridges/primitives/token-swap/Cargo.toml b/polkadot/bridges/primitives/token-swap/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..4b16c3567ea6eec862733abf4d44a0a40919795a
--- /dev/null
+++ b/polkadot/bridges/primitives/token-swap/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "bp-token-swap"
+description = "Primitives of the pallet-bridge-token-swap pallet"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
+
+# Substrate Dependencies
+
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false }
+
+[features]
+default = ["std"]
+std = [
+	"codec/std",
+	"frame-support/std",
+	"scale-info/std",
+	"sp-core/std",
+	"sp-std/std",
+]
diff --git a/polkadot/bridges/primitives/token-swap/src/lib.rs b/polkadot/bridges/primitives/token-swap/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..d46389e86891d68d9f14b68eda5574bf96c7b027
--- /dev/null
+++ b/polkadot/bridges/primitives/token-swap/src/lib.rs
@@ -0,0 +1,109 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+#![cfg_attr(not(feature = "std"), no_std)]
+
+use codec::{Decode, Encode};
+use frame_support::{weights::Weight, RuntimeDebug};
+use scale_info::TypeInfo;
+use sp_core::U256;
+use sp_std::vec::Vec;
+
+/// Pending token swap state.
+#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
+pub enum TokenSwapState {
+	/// The swap has been started using the `start_claim` call, but we have no proof that it has
+	/// happened at the Bridged chain.
+	Started,
+	/// The swap has happened at the Bridged chain and may be claimed by the Bridged chain party
+	/// using the `claim_swap` call.
+	Confirmed,
+	/// The swap has failed at the Bridged chain and This chain party may cancel it using the
+	/// `cancel_swap` call.
+	Failed,
+}
+
+/// Token swap type.
+///
+/// Different swap types give a different guarantees regarding possible swap
+/// replay protection.
+#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
+pub enum TokenSwapType<ThisBlockNumber> {
+	/// The `target_account_at_bridged_chain` is temporary and only have funds for single swap.
+	///
+	/// ***WARNING**: if `target_account_at_bridged_chain` still exists after the swap has been
+	/// completed (either by claiming or canceling), the `source_account_at_this_chain` will be
+	/// able to restart the swap again and repeat the swap until `target_account_at_bridged_chain`
+	/// depletes.
+	TemporaryTargetAccountAtBridgedChain,
+	/// This swap type prevents `source_account_at_this_chain` from restarting the swap after it
+	/// has been completed. There are two consequences:
+	///
+	/// 1) the `source_account_at_this_chain` won't be able to call `start_swap` after given
+	/// <ThisBlockNumber>; 2) the `target_account_at_bridged_chain` won't be able to call
+	/// `claim_swap` (over the bridge) before    block `<ThisBlockNumber + 1>`.
+	///
+	/// The second element is the nonce of the swap. You must care about its uniqueness if you're
+	/// planning to perform another swap with exactly the same parameters (i.e. same amount, same
+	/// accounts, same `ThisBlockNumber`) to avoid collisions.
+	LockClaimUntilBlock(ThisBlockNumber, U256),
+}
+
+/// An intention to swap `source_balance_at_this_chain` owned by `source_account_at_this_chain`
+/// to `target_balance_at_bridged_chain` owned by `target_account_at_bridged_chain`.
+///
+/// **IMPORTANT NOTE**: this structure is always the same during single token swap. So even
+/// when chain changes, the meaning of This and Bridged are still used to point to the same chains.
+/// This chain is always the chain where swap has been started. And the Bridged chain is the other
+/// chain.
+#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
+pub struct TokenSwap<ThisBlockNumber, ThisBalance, ThisAccountId, BridgedBalance, BridgedAccountId>
+{
+	/// The type of the swap.
+	pub swap_type: TokenSwapType<ThisBlockNumber>,
+	/// This chain balance to be swapped with `target_balance_at_bridged_chain`.
+	pub source_balance_at_this_chain: ThisBalance,
+	/// Account id of the party acting at This chain and owning the `source_account_at_this_chain`.
+	pub source_account_at_this_chain: ThisAccountId,
+	/// Bridged chain balance to be swapped with `source_balance_at_this_chain`.
+	pub target_balance_at_bridged_chain: BridgedBalance,
+	/// Account id of the party acting at the Bridged chain and owning the
+	/// `target_balance_at_bridged_chain`.
+	pub target_account_at_bridged_chain: BridgedAccountId,
+}
+
+/// SCALE-encoded `Currency::transfer` call on the bridged chain.
+pub type RawBridgedTransferCall = Vec<u8>;
+
+/// Token swap creation parameters.
+#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)]
+pub struct TokenSwapCreation<BridgedAccountPublic, ThisChainBalance, BridgedAccountSignature> {
+	/// Public key of the `target_account_at_bridged_chain` account used to verify
+	/// `bridged_currency_transfer_signature`.
+	pub target_public_at_bridged_chain: BridgedAccountPublic,
+	/// Fee that the `source_account_at_this_chain` is ready to pay for the tokens
+	/// transfer message delivery and dispatch.
+	pub swap_delivery_and_dispatch_fee: ThisChainBalance,
+	/// Specification version of the Bridged chain.
+	pub bridged_chain_spec_version: u32,
+	/// SCALE-encoded tokens transfer call at the Bridged chain.
+	pub bridged_currency_transfer: RawBridgedTransferCall,
+	/// Dispatch weight of the tokens transfer call at the Bridged chain.
+	pub bridged_currency_transfer_weight: Weight,
+	/// The signature of the `target_account_at_bridged_chain` for the message
+	/// returned by the `pallet_bridge_dispatch::account_ownership_digest()` function call.
+	pub bridged_currency_transfer_signature: BridgedAccountSignature,
+}
diff --git a/polkadot/bridges/relays/bin-ethereum/Cargo.toml b/polkadot/bridges/relays/bin-ethereum/Cargo.toml
index efd9c0194b28cae3164238fa1a82cb4000f9b14d..610dee2c3ce99a027b8b5fa6be3910302f8101e3 100644
--- a/polkadot/bridges/relays/bin-ethereum/Cargo.toml
+++ b/polkadot/bridges/relays/bin-ethereum/Cargo.toml
@@ -6,24 +6,22 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-ansi_term = "0.12"
+anyhow = "1.0"
 async-std = "1.9.0"
 async-trait = "0.1.42"
 clap = { version = "2.33.3", features = ["yaml"] }
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-env_logger = "0.8.3"
-ethabi = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" }
-ethabi-contract = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" }
-ethabi-derive = { git = "https://github.com/paritytech/ethabi", branch = "td-eth-types-11" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
+ethabi = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" }
+ethabi-contract = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" }
+ethabi-derive = { git = "https://github.com/svyatonik/ethabi.git", branch = "bump-deps" }
 futures = "0.3.12"
 hex = "0.4"
 hex-literal = "0.3"
-libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] }
+libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] }
 log = "0.4.14"
 num-traits = "0.2"
-serde = { version = "1.0", features = ["derive"] }
 serde_json = "1.0.64"
-time = "0.2"
+thiserror = "1.0.26"
 
 # Bridge dependencies
 
@@ -31,7 +29,6 @@ bp-currency-exchange = { path = "../../primitives/currency-exchange" }
 bp-eth-poa = { path = "../../primitives/ethereum-poa" }
 exchange-relay = { path = "../exchange" }
 headers-relay = { path = "../headers" }
-messages-relay = { path = "../messages" }
 relay-ethereum-client = { path = "../client-ethereum" }
 relay-rialto-client = { path = "../client-rialto" }
 relay-substrate-client = { path = "../client-substrate" }
@@ -40,9 +37,6 @@ rialto-runtime = { path = "../../bin/rialto/runtime" }
 
 # Substrate Dependencies
 
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
-substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/error.rs b/polkadot/bridges/relays/bin-ethereum/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..61ae2a9a498dc75d306aebce5af3e8ba0147e96a
--- /dev/null
+++ b/polkadot/bridges/relays/bin-ethereum/src/error.rs
@@ -0,0 +1,38 @@
+// Copyright 2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate::rpc_errors::RpcError;
+use thiserror::Error;
+
+/// Result type used by PoA relay.
+pub type Result<T> = std::result::Result<T, Error>;
+
+/// Ethereum PoA relay errors.
+#[derive(Error, Debug)]
+pub enum Error {
+	/// Failed to decode initial header.
+	#[error("Error decoding initial header: {0}")]
+	DecodeInitialHeader(codec::Error),
+	/// RPC error.
+	#[error("{0}")]
+	Rpc(#[from] RpcError),
+	/// Failed to read genesis header.
+	#[error("Error reading Substrate genesis header: {0:?}")]
+	ReadGenesisHeader(relay_substrate_client::Error),
+	/// Failed to read initial GRANDPA authorities.
+	#[error("Error reading GRANDPA authorities set: {0:?}")]
+	ReadAuthorities(relay_substrate_client::Error),
+	/// Failed to deploy bridge contract to Ethereum chain.
+	#[error("Error deploying contract: {0:?}")]
+	DeployContract(RpcError),
+}
diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs
index 6fe3a1ce17b5ec3b5a1d31d43be792d2e6c16c3e..eea6b6e454809ce2b32ffd295b6265fbaafadafb 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_client.rs
@@ -14,8 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::rpc_errors::RpcError;
-use crate::substrate_sync_loop::QueuedRialtoHeader;
+use crate::{rpc_errors::RpcError, substrate_sync_loop::QueuedRialtoHeader};
 
 use async_trait::async_trait;
 use bp_eth_poa::signatures::secret_to_address;
@@ -41,7 +40,7 @@ type RpcResult<T> = std::result::Result<T, RpcError>;
 /// interactions involving, for example, an Ethereum contract.
 #[async_trait]
 pub trait EthereumHighLevelRpc {
-	/// Returns best Substrate block that PoA chain knows of.
+	/// Returns the best Substrate block that PoA chain knows of.
 	async fn best_substrate_block(&self, contract_address: Address) -> RpcResult<RialtoHeaderId>;
 
 	/// Returns true if Substrate header is known to Ethereum node.
@@ -60,7 +59,10 @@ pub trait EthereumHighLevelRpc {
 	) -> SubmittedHeaders<RialtoHeaderId, RpcError>;
 
 	/// Returns ids of incomplete Substrate headers.
-	async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult<HashSet<RialtoHeaderId>>;
+	async fn incomplete_substrate_headers(
+		&self,
+		contract_address: Address,
+	) -> RpcResult<HashSet<RialtoHeaderId>>;
 
 	/// Complete Substrate header.
 	async fn complete_substrate_header(
@@ -104,7 +106,7 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let hash = rialto_runtime::Hash::decode(&mut &raw_hash[..])?;
 
 		if number != number.low_u32().into() {
-			return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber));
+			return Err(RpcError::Ethereum(EthereumNodeError::InvalidSubstrateBlockNumber))
 		}
 
 		Ok(HeaderId(number.low_u32(), hash))
@@ -138,31 +140,28 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let address: Address = secret_to_address(&params.signer);
 		let nonce = match self.account_nonce(address).await {
 			Ok(nonce) => nonce,
-			Err(error) => {
+			Err(error) =>
 				return SubmittedHeaders {
 					submitted: Vec::new(),
 					incomplete: Vec::new(),
 					rejected: headers.iter().rev().map(|header| header.id()).collect(),
 					fatal_error: Some(error.into()),
-				}
-			}
+				},
 		};
 
 		// submit headers. Note that we're cloning self here. It is ok, because
 		// cloning `jsonrpsee::Client` only clones reference to background threads
 		submit_substrate_headers(
-			EthereumHeadersSubmitter {
-				client: self.clone(),
-				params,
-				contract_address,
-				nonce,
-			},
+			EthereumHeadersSubmitter { client: self.clone(), params, contract_address, nonce },
 			headers,
 		)
 		.await
 	}
 
-	async fn incomplete_substrate_headers(&self, contract_address: Address) -> RpcResult<HashSet<RialtoHeaderId>> {
+	async fn incomplete_substrate_headers(
+		&self,
+		contract_address: Address,
+	) -> RpcResult<HashSet<RialtoHeaderId>> {
 		let (encoded_call, call_decoder) = bridge_contract::functions::incomplete_headers::call();
 		let call_request = CallRequest {
 			to: Some(contract_address),
@@ -173,13 +172,14 @@ impl EthereumHighLevelRpc for EthereumClient {
 		let call_result = self.eth_call(call_request).await?;
 
 		// Q: Is is correct to call these "incomplete_ids"?
-		let (incomplete_headers_numbers, incomplete_headers_hashes) = call_decoder.decode(&call_result.0)?;
+		let (incomplete_headers_numbers, incomplete_headers_hashes) =
+			call_decoder.decode(&call_result.0)?;
 		let incomplete_ids = incomplete_headers_numbers
 			.into_iter()
 			.zip(incomplete_headers_hashes)
 			.filter_map(|(number, hash)| {
 				if number != number.low_u32().into() {
-					return None;
+					return None
 				}
 
 				Some(HeaderId(number.low_u32(), hash))
@@ -202,7 +202,11 @@ impl EthereumHighLevelRpc for EthereumClient {
 				Some(contract_address),
 				None,
 				false,
-				bridge_contract::functions::import_finality_proof::encode_input(id.0, id.1, justification),
+				bridge_contract::functions::import_finality_proof::encode_input(
+					id.0,
+					id.1,
+					justification,
+				),
 			)
 			.await?;
 
@@ -263,7 +267,7 @@ impl HeadersBatch {
 	) -> Result<(Self, Vec<RialtoHeaderId>), ()> {
 		if headers.len() != ids.len() {
 			log::error!(target: "bridge", "Collection size mismatch ({} vs {})", headers.len(), ids.len());
-			return Err(());
+			return Err(())
 		}
 
 		let header1 = headers.pop().ok_or(())?;
@@ -276,27 +280,14 @@ impl HeadersBatch {
 			submitting_ids.extend(ids.pop().iter());
 		}
 
-		Ok((
-			Self {
-				header1,
-				header2,
-				header3,
-				header4,
-			},
-			submitting_ids,
-		))
+		Ok((Self { header1, header2, header3, header4 }, submitting_ids))
 	}
 
 	/// Returns unified array of headers.
 	///
 	/// The first element is always `Some`.
 	fn headers(&self) -> [Option<&QueuedRialtoHeader>; HEADERS_BATCH] {
-		[
-			Some(&self.header1),
-			self.header2.as_ref(),
-			self.header3.as_ref(),
-			self.header4.as_ref(),
-		]
+		[Some(&self.header1), self.header2.as_ref(), self.header3.as_ref(), self.header4.as_ref()]
 	}
 
 	/// Encodes all headers. If header is not present an empty vector will be returned.
@@ -323,9 +314,10 @@ impl HeadersBatch {
 	/// or when `idx > HEADERS_BATCH`.
 	pub fn split_off(&mut self, idx: usize) -> Result<(), ()> {
 		if idx == 0 || idx > HEADERS_BATCH {
-			return Err(());
+			return Err(())
 		}
-		let mut vals: [_; HEADERS_BATCH] = [&mut None, &mut self.header2, &mut self.header3, &mut self.header4];
+		let mut vals: [_; HEADERS_BATCH] =
+			[&mut None, &mut self.header2, &mut self.header3, &mut self.header4];
 		for val in vals.iter_mut().skip(idx) {
 			**val = None;
 		}
@@ -359,7 +351,8 @@ struct EthereumHeadersSubmitter {
 impl HeadersSubmitter for EthereumHeadersSubmitter {
 	async fn is_headers_incomplete(&self, headers: &HeadersBatch) -> RpcResult<usize> {
 		let [h1, h2, h3, h4] = headers.encode();
-		let (encoded_call, call_decoder) = bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4);
+		let (encoded_call, call_decoder) =
+			bridge_contract::functions::is_incomplete_headers::call(h1, h2, h3, h4);
 		let call_request = CallRequest {
 			to: Some(self.contract_address),
 			data: Some(encoded_call.into()),
@@ -369,7 +362,7 @@ impl HeadersSubmitter for EthereumHeadersSubmitter {
 		let call_result = self.client.eth_call(call_request).await?;
 		let incomplete_index: U256 = call_decoder.decode(&call_result.0)?;
 		if incomplete_index > HEADERS_BATCH.into() {
-			return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex));
+			return Err(RpcError::Ethereum(EthereumNodeError::InvalidIncompleteIndex))
 		}
 
 		Ok(incomplete_index.low_u32() as _)
@@ -407,17 +400,21 @@ async fn submit_substrate_headers(
 	headers.reverse();
 
 	while !headers.is_empty() {
-		let (headers, submitting_ids) =
-			HeadersBatch::pop_from(&mut headers, &mut ids).expect("Headers and ids are not empty; qed");
+		let (headers, submitting_ids) = HeadersBatch::pop_from(&mut headers, &mut ids)
+			.expect("Headers and ids are not empty; qed");
 
-		submitted_headers.fatal_error =
-			submit_substrate_headers_batch(&mut header_submitter, &mut submitted_headers, submitting_ids, headers)
-				.await;
+		submitted_headers.fatal_error = submit_substrate_headers_batch(
+			&mut header_submitter,
+			&mut submitted_headers,
+			submitting_ids,
+			headers,
+		)
+		.await;
 
 		if submitted_headers.fatal_error.is_some() {
 			ids.reverse();
 			submitted_headers.rejected.extend(ids);
-			break;
+			break
 		}
 	}
 
@@ -436,9 +433,11 @@ async fn submit_substrate_headers_batch(
 	// if parent of first header is either incomplete, or rejected, we assume that contract
 	// will reject this header as well
 	let parent_id = headers.header1.parent_id();
-	if submitted_headers.rejected.contains(&parent_id) || submitted_headers.incomplete.contains(&parent_id) {
+	if submitted_headers.rejected.contains(&parent_id) ||
+		submitted_headers.incomplete.contains(&parent_id)
+	{
 		submitted_headers.rejected.extend(ids);
-		return None;
+		return None
 	}
 
 	// check if headers are incomplete
@@ -450,11 +449,11 @@ async fn submit_substrate_headers_batch(
 			// contract has rejected all headers => we do not want to submit it
 			submitted_headers.rejected.extend(ids);
 			if error.is_connection_error() {
-				return Some(error);
+				return Some(error)
 			} else {
-				return None;
+				return None
 			}
-		}
+		},
 	};
 
 	// Modify `ids` and `headers` to only contain values that are going to be accepted.
@@ -477,12 +476,12 @@ async fn submit_substrate_headers_batch(
 			submitted_headers.submitted.extend(submitted);
 			submitted_headers.rejected.extend(rejected);
 			None
-		}
+		},
 		Err(error) => {
 			submitted_headers.rejected.extend(submitted);
 			submitted_headers.rejected.extend(rejected);
 			Some(error)
-		}
+		},
 	}
 }
 
@@ -521,11 +520,7 @@ mod tests {
 				number,
 				Default::default(),
 				Default::default(),
-				if number == 0 {
-					Default::default()
-				} else {
-					header(number - 1).id().1
-				},
+				if number == 0 { Default::default() } else { header(number - 1).id().1 },
 				Default::default(),
 			)
 			.into(),
@@ -535,10 +530,7 @@ mod tests {
 	#[test]
 	fn descendants_of_incomplete_headers_are_not_submitted() {
 		let submitted_headers = async_std::task::block_on(submit_substrate_headers(
-			TestHeadersSubmitter {
-				incomplete: vec![header(5).id()],
-				failed: vec![],
-			},
+			TestHeadersSubmitter { incomplete: vec![header(5).id()], failed: vec![] },
 			vec![header(5), header(6)],
 		));
 		assert_eq!(submitted_headers.submitted, vec![header(5).id()]);
@@ -550,19 +542,8 @@ mod tests {
 	#[test]
 	fn headers_after_fatal_error_are_not_submitted() {
 		let submitted_headers = async_std::task::block_on(submit_substrate_headers(
-			TestHeadersSubmitter {
-				incomplete: vec![],
-				failed: vec![header(9).id()],
-			},
-			vec![
-				header(5),
-				header(6),
-				header(7),
-				header(8),
-				header(9),
-				header(10),
-				header(11),
-			],
+			TestHeadersSubmitter { incomplete: vec![], failed: vec![header(9).id()] },
+			vec![header(5), header(6), header(7), header(8), header(9), header(10), header(11)],
 		));
 		assert_eq!(
 			submitted_headers.submitted,
@@ -583,10 +564,7 @@ mod tests {
 		let (headers, ids) = HeadersBatch::pop_from(&mut init_headers, &mut init_ids).unwrap();
 		assert_eq!(init_headers, vec![header(5)]);
 		assert_eq!(init_ids, vec![header(5).id()]);
-		assert_eq!(
-			ids,
-			vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]
-		);
+		assert_eq!(ids, vec![header(1).id(), header(2).id(), header(3).id(), header(4).id()]);
 		headers
 	}
 
diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
index 3f9076f6db2298e0bcbbd84be8e2a783cfc64b32..76a75b062ecbc2fe828c900bbcb29435f859f517 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_deploy_contract.rs
@@ -14,17 +14,22 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::ethereum_client::{bridge_contract, EthereumHighLevelRpc};
-use crate::rpc_errors::RpcError;
+use crate::{
+	error::{Error, Result},
+	ethereum_client::{bridge_contract, EthereumHighLevelRpc},
+	rpc_errors::RpcError,
+};
 
 use codec::{Decode, Encode};
 use num_traits::Zero;
 use relay_ethereum_client::{
-	Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
+	SigningParams as EthereumSigningParams,
 };
 use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto};
 use relay_substrate_client::{
-	Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams, OpaqueGrandpaAuthoritiesSet,
+	Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	OpaqueGrandpaAuthoritiesSet,
 };
 use relay_utils::HeaderId;
 
@@ -100,21 +105,21 @@ pub async fn run(params: EthereumDeployContractParams) {
 async fn prepare_initial_header(
 	sub_client: &SubstrateClient<Rialto>,
 	sub_initial_header: Option<Vec<u8>>,
-) -> Result<(RialtoHeaderId, Vec<u8>), String> {
+) -> Result<(RialtoHeaderId, Vec<u8>)> {
 	match sub_initial_header {
-		Some(raw_initial_header) => match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) {
-			Ok(initial_header) => Ok((
-				HeaderId(initial_header.number, initial_header.hash()),
-				raw_initial_header,
-			)),
-			Err(error) => Err(format!("Error decoding initial header: {}", error)),
+		Some(raw_initial_header) => {
+			match rialto_runtime::Header::decode(&mut &raw_initial_header[..]) {
+				Ok(initial_header) =>
+					Ok((HeaderId(initial_header.number, initial_header.hash()), raw_initial_header)),
+				Err(error) => Err(Error::DecodeInitialHeader(error)),
+			}
 		},
 		None => {
 			let initial_header = sub_client.header_by_number(Zero::zero()).await;
 			initial_header
 				.map(|header| (HeaderId(Zero::zero(), header.hash()), header.encode()))
-				.map_err(|error| format!("Error reading Substrate genesis header: {:?}", error))
-		}
+				.map_err(Error::ReadGenesisHeader)
+		},
 	}
 }
 
@@ -123,13 +128,13 @@ async fn prepare_initial_authorities_set(
 	sub_client: &SubstrateClient<Rialto>,
 	sub_initial_header_hash: rialto_runtime::Hash,
 	sub_initial_authorities_set: Option<Vec<u8>>,
-) -> Result<OpaqueGrandpaAuthoritiesSet, String> {
+) -> Result<OpaqueGrandpaAuthoritiesSet> {
 	let initial_authorities_set = match sub_initial_authorities_set {
 		Some(initial_authorities_set) => Ok(initial_authorities_set),
 		None => sub_client.grandpa_authorities_set(sub_initial_header_hash).await,
 	};
 
-	initial_authorities_set.map_err(|error| format!("Error reading GRANDPA authorities set: {:?}", error))
+	initial_authorities_set.map_err(Error::ReadAuthorities)
 }
 
 /// Deploy bridge contract to Ethereum chain.
@@ -140,15 +145,20 @@ async fn deploy_bridge_contract(
 	initial_header: Vec<u8>,
 	initial_set_id: u64,
 	initial_authorities: Vec<u8>,
-) -> Result<(), String> {
+) -> Result<()> {
 	eth_client
 		.submit_ethereum_transaction(
 			params,
 			None,
 			None,
 			false,
-			bridge_contract::constructor(contract_code, initial_header, initial_set_id, initial_authorities),
+			bridge_contract::constructor(
+				contract_code,
+				initial_header,
+				initial_set_id,
+				initial_authorities,
+			),
 		)
 		.await
-		.map_err(|error| format!("Error deploying contract: {:?}", error))
+		.map_err(Error::DeployContract)
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
index 3111aa2de436375fa69b1ec60420602cca755c29..90d9a23835d46fa2d934cb73e24da37f51a72626 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange.rs
@@ -16,28 +16,34 @@
 
 //! Relaying proofs of PoA -> Substrate exchange transactions.
 
-use crate::instances::BridgeInstance;
-use crate::rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc};
-use crate::rpc_errors::RpcError;
-use crate::substrate_types::into_substrate_ethereum_receipt;
+use crate::{
+	instances::BridgeInstance,
+	rialto_client::{SubmitEthereumExchangeTransactionProof, SubstrateHighLevelRpc},
+	rpc_errors::RpcError,
+	substrate_types::into_substrate_ethereum_receipt,
+};
 
 use async_trait::async_trait;
 use bp_currency_exchange::MaybeLockFundsTransaction;
-use exchange_relay::exchange::{
-	relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient,
-	TransactionProofPipeline,
+use exchange_relay::{
+	exchange::{
+		relay_single_transaction_proof, SourceBlock, SourceClient, SourceTransaction, TargetClient,
+		TransactionProofPipeline,
+	},
+	exchange_loop::{run as run_loop, InMemoryStorage},
 };
-use exchange_relay::exchange_loop::{run as run_loop, InMemoryStorage};
 use relay_ethereum_client::{
 	types::{
 		HeaderId as EthereumHeaderId, HeaderWithTransactions as EthereumHeaderWithTransactions,
-		Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256, HEADER_ID_PROOF,
+		Transaction as EthereumTransaction, TransactionHash as EthereumTransactionHash, H256,
+		HEADER_ID_PROOF,
 	},
 	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
 };
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
 use relay_substrate_client::{
-	Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	Chain as SubstrateChain, Client as SubstrateClient,
+	ConnectionParams as SubstrateConnectionParams,
 };
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId};
 use rialto_runtime::exchange::EthereumTransactionInclusionProof;
@@ -111,12 +117,7 @@ impl SourceBlock for EthereumSourceBlock {
 	}
 
 	fn transactions(&self) -> Vec<Self::Transaction> {
-		self.0
-			.transactions
-			.iter()
-			.cloned()
-			.map(EthereumSourceTransaction)
-			.collect()
+		self.0.transactions.iter().cloned().map(EthereumSourceTransaction).collect()
 	}
 }
 
@@ -178,13 +179,12 @@ impl SourceClient<EthereumToSubstrateExchange> for EthereumTransactionsSource {
 		};
 
 		// we need transaction to be mined => check if it is included in the block
-		let (eth_header_id, eth_tx_index) = match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) {
-			(Some(block_number), Some(block_hash), Some(transaction_index)) => (
-				HeaderId(block_number.as_u64(), block_hash),
-				transaction_index.as_u64() as _,
-			),
-			_ => return Ok(None),
-		};
+		let (eth_header_id, eth_tx_index) =
+			match (eth_tx.block_number, eth_tx.block_hash, eth_tx.transaction_index) {
+				(Some(block_number), Some(block_hash), Some(transaction_index)) =>
+					(HeaderId(block_number.as_u64(), block_hash), transaction_index.as_u64() as _),
+				_ => return Ok(None),
+			};
 
 		Ok(Some((eth_header_id, eth_tx_index)))
 	}
@@ -194,9 +194,11 @@ impl SourceClient<EthereumToSubstrateExchange> for EthereumTransactionsSource {
 		block: &EthereumSourceBlock,
 		tx_index: usize,
 	) -> Result<EthereumTransactionInclusionProof, RpcError> {
-		const TRANSACTION_HAS_RAW_FIELD_PROOF: &str = "RPC level checks that transactions from Ethereum\
+		const TRANSACTION_HAS_RAW_FIELD_PROOF: &str =
+			"RPC level checks that transactions from Ethereum\
 			node are having `raw` field; qed";
-		const BLOCK_HAS_HASH_FIELD_PROOF: &str = "RPC level checks that block has `hash` field; qed";
+		const BLOCK_HAS_HASH_FIELD_PROOF: &str =
+			"RPC level checks that block has `hash` field; qed";
 
 		let mut transaction_proof = Vec::with_capacity(block.0.transactions.len());
 		for tx in &block.0.transactions {
@@ -266,12 +268,15 @@ impl TargetClient<EthereumToSubstrateExchange> for SubstrateTransactionsTarget {
 		self.client.best_ethereum_finalized_block().await
 	}
 
-	async fn filter_transaction_proof(&self, proof: &EthereumTransactionInclusionProof) -> Result<bool, RpcError> {
+	async fn filter_transaction_proof(
+		&self,
+		proof: &EthereumTransactionInclusionProof,
+	) -> Result<bool, RpcError> {
 		// let's try to parse transaction locally
 		let (raw_tx, raw_tx_receipt) = &proof.proof[proof.index as usize];
 		let parse_result = rialto_runtime::exchange::EthTransaction::parse(raw_tx);
 		if parse_result.is_err() {
-			return Ok(false);
+			return Ok(false)
 		}
 
 		// now let's check if transaction is successful
@@ -285,8 +290,12 @@ impl TargetClient<EthereumToSubstrateExchange> for SubstrateTransactionsTarget {
 		self.client.verify_exchange_transaction_proof(proof.clone()).await
 	}
 
-	async fn submit_transaction_proof(&self, proof: EthereumTransactionInclusionProof) -> Result<(), RpcError> {
-		let (sign_params, bridge_instance) = (self.sign_params.clone(), self.bridge_instance.clone());
+	async fn submit_transaction_proof(
+		&self,
+		proof: EthereumTransactionInclusionProof,
+	) -> Result<(), RpcError> {
+		let (sign_params, bridge_instance) =
+			(self.sign_params.clone(), self.bridge_instance.clone());
 		self.client
 			.submit_exchange_transaction_proof(sign_params, bridge_instance, proof)
 			.await
@@ -311,9 +320,10 @@ pub async fn run(params: EthereumExchangeParams) {
 					err,
 				),
 			}
-		}
+		},
 		ExchangeRelayMode::Auto(eth_start_with_block_number) => {
-			let result = run_auto_transactions_relay_loop(params, eth_start_with_block_number).await;
+			let result =
+				run_auto_transactions_relay_loop(params, eth_start_with_block_number).await;
 			if let Err(err) = result {
 				log::error!(
 					target: "bridge",
@@ -321,23 +331,18 @@ pub async fn run(params: EthereumExchangeParams) {
 					err,
 				);
 			}
-		}
+		},
 	}
 }
 
 /// Run single transaction proof relay and stop.
-async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_hash: H256) -> Result<(), String> {
-	let EthereumExchangeParams {
-		eth_params,
-		sub_params,
-		sub_sign,
-		instance,
-		..
-	} = params;
+async fn run_single_transaction_relay(
+	params: EthereumExchangeParams,
+	eth_tx_hash: H256,
+) -> anyhow::Result<()> {
+	let EthereumExchangeParams { eth_params, sub_params, sub_sign, instance, .. } = params;
 
-	let eth_client = EthereumClient::try_connect(eth_params)
-		.await
-		.map_err(RpcError::Ethereum)?;
+	let eth_client = EthereumClient::try_connect(eth_params).await.map_err(RpcError::Ethereum)?;
 	let sub_client = SubstrateClient::<Rialto>::try_connect(sub_params)
 		.await
 		.map_err(RpcError::Substrate)?;
@@ -349,20 +354,17 @@ async fn run_single_transaction_relay(params: EthereumExchangeParams, eth_tx_has
 		bridge_instance: instance,
 	};
 
-	relay_single_transaction_proof(&source, &target, eth_tx_hash).await
+	relay_single_transaction_proof(&source, &target, eth_tx_hash)
+		.await
+		.map_err(Into::into)
 }
 
 async fn run_auto_transactions_relay_loop(
 	params: EthereumExchangeParams,
 	eth_start_with_block_number: Option<u64>,
-) -> Result<(), String> {
+) -> anyhow::Result<()> {
 	let EthereumExchangeParams {
-		eth_params,
-		sub_params,
-		sub_sign,
-		metrics_params,
-		instance,
-		..
+		eth_params, sub_params, sub_sign, metrics_params, instance, ..
 	} = params;
 
 	let eth_client = EthereumClient::new(eth_params).await;
@@ -370,18 +372,17 @@ async fn run_auto_transactions_relay_loop(
 
 	let eth_start_with_block_number = match eth_start_with_block_number {
 		Some(eth_start_with_block_number) => eth_start_with_block_number,
-		None => {
+		None =>
 			sub_client
 				.best_ethereum_finalized_block()
 				.await
 				.map_err(|err| {
-					format!(
+					anyhow::format_err!(
 						"Error retrieving best finalized Ethereum block from Substrate node: {:?}",
 						err
 					)
 				})?
-				.0
-		}
+				.0,
 	};
 
 	run_loop(
diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
index 602d4f14e4f0bacc7ab360b2db288428c4ab758b..f68a21e594e068ea813e8cab6f2aa9352f96ce82 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_exchange_submit.rs
@@ -16,13 +16,15 @@
 
 //! Submitting Ethereum -> Substrate exchange transactions.
 
+use anyhow::anyhow;
 use bp_eth_poa::{
 	signatures::{secret_to_address, SignTransaction},
 	UnsignedTransaction,
 };
 use relay_ethereum_client::{
 	types::{CallRequest, U256},
-	Client as EthereumClient, ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+	Client as EthereumClient, ConnectionParams as EthereumConnectionParams,
+	SigningParams as EthereumSigningParams,
 };
 use rialto_runtime::exchange::LOCK_FUNDS_ADDRESS;
 
@@ -43,18 +45,13 @@ pub struct EthereumExchangeSubmitParams {
 
 /// Submit single Ethereum -> Substrate exchange transaction.
 pub async fn run(params: EthereumExchangeSubmitParams) {
-	let EthereumExchangeSubmitParams {
-		eth_params,
-		eth_sign,
-		eth_nonce,
-		eth_amount,
-		sub_recipient,
-	} = params;
+	let EthereumExchangeSubmitParams { eth_params, eth_sign, eth_nonce, eth_amount, sub_recipient } =
+		params;
 
-	let result: Result<_, String> = async move {
+	let result: anyhow::Result<_> = async move {
 		let eth_client = EthereumClient::try_connect(eth_params)
 			.await
-			.map_err(|err| format!("error connecting to Ethereum node: {:?}", err))?;
+			.map_err(|err| anyhow!("error connecting to Ethereum node: {:?}", err))?;
 
 		let eth_signer_address = secret_to_address(&eth_sign.signer);
 		let sub_recipient_encoded = sub_recipient;
@@ -63,7 +60,7 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 			None => eth_client
 				.account_nonce(eth_signer_address)
 				.await
-				.map_err(|err| format!("error fetching acount nonce: {:?}", err))?,
+				.map_err(|err| anyhow!("error fetching acount nonce: {:?}", err))?,
 		};
 		let gas = eth_client
 			.estimate_gas(CallRequest {
@@ -74,7 +71,7 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 				..Default::default()
 			})
 			.await
-			.map_err(|err| format!("error estimating gas requirements: {:?}", err))?;
+			.map_err(|err| anyhow!("error estimating gas requirements: {:?}", err))?;
 		let eth_tx_unsigned = UnsignedTransaction {
 			nonce,
 			gas_price: eth_sign.gas_price,
@@ -83,13 +80,12 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 			value: eth_amount,
 			payload: sub_recipient_encoded.to_vec(),
 		};
-		let eth_tx_signed = eth_tx_unsigned
-			.clone()
-			.sign_by(&eth_sign.signer, Some(eth_sign.chain_id));
+		let eth_tx_signed =
+			eth_tx_unsigned.clone().sign_by(&eth_sign.signer, Some(eth_sign.chain_id));
 		eth_client
 			.submit_transaction(eth_tx_signed)
 			.await
-			.map_err(|err| format!("error submitting transaction: {:?}", err))?;
+			.map_err(|err| anyhow!("error submitting transaction: {:?}", err))?;
 
 		Ok(eth_tx_unsigned)
 	}
@@ -102,13 +98,13 @@ pub async fn run(params: EthereumExchangeSubmitParams) {
 				"Exchange transaction has been submitted to Ethereum node: {:?}",
 				eth_tx_unsigned,
 			);
-		}
+		},
 		Err(err) => {
 			log::error!(
 				target: "bridge",
 				"Error submitting exchange transaction to Ethereum node: {}",
 				err,
 			);
-		}
+		},
 	}
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
index 111abcd86e7105145cad5a69385f2070e66154d5..ee5f8a4600ece2376ac2c2d97112ad16cc2ffd1c 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/ethereum_sync_loop.rs
@@ -16,11 +16,13 @@
 
 //! Ethereum PoA -> Rialto-Substrate synchronization.
 
-use crate::ethereum_client::EthereumHighLevelRpc;
-use crate::instances::BridgeInstance;
-use crate::rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc};
-use crate::rpc_errors::RpcError;
-use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts};
+use crate::{
+	ethereum_client::EthereumHighLevelRpc,
+	instances::BridgeInstance,
+	rialto_client::{SubmitEthereumHeaders, SubstrateHighLevelRpc},
+	rpc_errors::RpcError,
+	substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts},
+};
 
 use async_trait::async_trait;
 use codec::Encode;
@@ -35,12 +37,12 @@ use relay_ethereum_client::{
 };
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
 use relay_substrate_client::{
-	Chain as SubstrateChain, Client as SubstrateClient, ConnectionParams as SubstrateConnectionParams,
+	Chain as SubstrateChain, Client as SubstrateClient,
+	ConnectionParams as SubstrateConnectionParams,
 };
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient};
 
-use std::fmt::Debug;
-use std::{collections::HashSet, sync::Arc, time::Duration};
+use std::{collections::HashSet, fmt::Debug, sync::Arc, time::Duration};
 
 pub mod consts {
 	use super::*;
@@ -57,7 +59,8 @@ pub mod consts {
 	pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 128;
 	/// Max Ethereum headers count we want to have in 'submitted' state.
 	pub const MAX_SUBMITTED_HEADERS: usize = 128;
-	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned).
+	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten
+	/// (pruned).
 	pub const PRUNE_DEPTH: u32 = 4096;
 }
 
@@ -106,8 +109,8 @@ impl HeadersSyncPipeline for EthereumHeadersSyncPipeline {
 	type Completion = ();
 
 	fn estimate_size(source: &QueuedHeader<Self>) -> usize {
-		into_substrate_ethereum_header(source.header()).encode().len()
-			+ into_substrate_ethereum_receipts(source.extra())
+		into_substrate_ethereum_header(source.header()).encode().len() +
+			into_substrate_ethereum_receipts(source.extra())
 				.map(|extra| extra.encode().len())
 				.unwrap_or(0)
 	}
@@ -148,22 +151,17 @@ impl SourceClient<EthereumHeadersSyncPipeline> for EthereumHeadersSource {
 	}
 
 	async fn header_by_hash(&self, hash: HeaderHash) -> Result<Header, RpcError> {
-		self.client
-			.header_by_hash(hash)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into)
 	}
 
 	async fn header_by_number(&self, number: u64) -> Result<Header, RpcError> {
-		self.client
-			.header_by_number(number)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_number(number).await.map(Into::into).map_err(Into::into)
 	}
 
-	async fn header_completion(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, Option<()>), RpcError> {
+	async fn header_completion(
+		&self,
+		id: EthereumHeaderId,
+	) -> Result<(EthereumHeaderId, Option<()>), RpcError> {
 		Ok((id, None))
 	}
 
@@ -172,9 +170,7 @@ impl SourceClient<EthereumHeadersSyncPipeline> for EthereumHeadersSource {
 		id: EthereumHeaderId,
 		header: QueuedEthereumHeader,
 	) -> Result<(EthereumHeaderId, Vec<Receipt>), RpcError> {
-		self.client
-			.transaction_receipts(id, header.header().transactions.clone())
-			.await
+		self.client.transaction_receipts(id, header.header().transactions.clone()).await
 	}
 }
 
@@ -197,12 +193,7 @@ impl SubstrateHeadersTarget {
 		sign_params: RialtoSigningParams,
 		bridge_instance: Arc<dyn BridgeInstance>,
 	) -> Self {
-		Self {
-			client,
-			sign_transactions,
-			sign_params,
-			bridge_instance,
-		}
+		Self { client, sign_transactions, sign_params, bridge_instance }
 	}
 }
 
@@ -225,16 +216,19 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
 		self.client.best_ethereum_block().await
 	}
 
-	async fn is_known_header(&self, id: EthereumHeaderId) -> Result<(EthereumHeaderId, bool), RpcError> {
+	async fn is_known_header(
+		&self,
+		id: EthereumHeaderId,
+	) -> Result<(EthereumHeaderId, bool), RpcError> {
 		Ok((id, self.client.ethereum_header_known(id).await?))
 	}
 
-	async fn submit_headers(&self, headers: Vec<QueuedEthereumHeader>) -> SubmittedHeaders<EthereumHeaderId, RpcError> {
-		let (sign_params, bridge_instance, sign_transactions) = (
-			self.sign_params.clone(),
-			self.bridge_instance.clone(),
-			self.sign_transactions,
-		);
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedEthereumHeader>,
+	) -> SubmittedHeaders<EthereumHeaderId, RpcError> {
+		let (sign_params, bridge_instance, sign_transactions) =
+			(self.sign_params.clone(), self.bridge_instance.clone(), self.sign_transactions);
 		self.client
 			.submit_ethereum_headers(sign_params, bridge_instance, headers, sign_transactions)
 			.await
@@ -245,11 +239,18 @@ impl TargetClient<EthereumHeadersSyncPipeline> for SubstrateHeadersTarget {
 	}
 
 	#[allow(clippy::unit_arg)]
-	async fn complete_header(&self, id: EthereumHeaderId, _completion: ()) -> Result<EthereumHeaderId, RpcError> {
+	async fn complete_header(
+		&self,
+		id: EthereumHeaderId,
+		_completion: (),
+	) -> Result<EthereumHeaderId, RpcError> {
 		Ok(id)
 	}
 
-	async fn requires_extra(&self, header: QueuedEthereumHeader) -> Result<(EthereumHeaderId, bool), RpcError> {
+	async fn requires_extra(
+		&self,
+		header: QueuedEthereumHeader,
+	) -> Result<(EthereumHeaderId, bool), RpcError> {
 		// we can minimize number of receipts_check calls by checking header
 		// logs bloom here, but it may give us false positives (when authorities
 		// source is contract, we never need any logs)
@@ -292,7 +293,7 @@ pub async fn run(params: EthereumSyncParams) -> Result<(), RpcError> {
 		futures::future::pending(),
 	)
 	.await
-	.map_err(RpcError::SyncLoop)?;
+	.map_err(|e| RpcError::SyncLoop(e.to_string()))?;
 
 	Ok(())
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/instances.rs b/polkadot/bridges/relays/bin-ethereum/src/instances.rs
index 2ade8632a92c03d0ccb208c612174ba99652a0aa..74feb1da320d4bb3e401366d85166874cd394d76 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/instances.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/instances.rs
@@ -18,16 +18,18 @@
 //! synchronizing a Substrate chain which can include multiple instances of the bridge pallet we
 //! must somehow decide which of the instances to sync.
 //!
-//! Note that each instance of the bridge pallet is coupled with an instance of the currency exchange
-//! pallet. We must also have a way to create `Call`s for the correct currency exchange instance.
+//! Note that each instance of the bridge pallet is coupled with an instance of the currency
+//! exchange pallet. We must also have a way to create `Call`s for the correct currency exchange
+//! instance.
 //!
 //! This module helps by preparing the correct `Call`s for each of the different pallet instances.
 
-use crate::ethereum_sync_loop::QueuedEthereumHeader;
-use crate::substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts};
+use crate::{
+	ethereum_sync_loop::QueuedEthereumHeader,
+	substrate_types::{into_substrate_ethereum_header, into_substrate_ethereum_receipts},
+};
 
-use rialto_runtime::exchange::EthereumTransactionInclusionProof as Proof;
-use rialto_runtime::Call;
+use rialto_runtime::{exchange::EthereumTransactionInclusionProof as Proof, Call};
 
 /// Interface for `Calls` which are needed to correctly sync the bridge.
 ///
@@ -48,8 +50,8 @@ pub struct RialtoPoA;
 
 impl BridgeInstance for RialtoPoA {
 	fn build_signed_header_call(&self, headers: Vec<QueuedEthereumHeader>) -> Call {
-		let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers(
-			headers
+		let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers {
+			headers_with_receipts: headers
 				.into_iter()
 				.map(|header| {
 					(
@@ -58,22 +60,23 @@ impl BridgeInstance for RialtoPoA {
 					)
 				})
 				.collect(),
-		);
+		};
 
 		rialto_runtime::Call::BridgeRialtoPoa(pallet_call)
 	}
 
 	fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call {
-		let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header(
-			into_substrate_ethereum_header(header.header()),
-			into_substrate_ethereum_receipts(header.extra()),
-		);
+		let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header {
+			header: Box::new(into_substrate_ethereum_header(header.header())),
+			receipts: into_substrate_ethereum_receipts(header.extra()),
+		};
 
 		rialto_runtime::Call::BridgeRialtoPoa(pallet_call)
 	}
 
 	fn build_currency_exchange_call(&self, proof: Proof) -> Call {
-		let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
+		let pallet_call =
+			rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof };
 		rialto_runtime::Call::BridgeRialtoCurrencyExchange(pallet_call)
 	}
 }
@@ -84,8 +87,8 @@ pub struct Kovan;
 
 impl BridgeInstance for Kovan {
 	fn build_signed_header_call(&self, headers: Vec<QueuedEthereumHeader>) -> Call {
-		let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers(
-			headers
+		let pallet_call = rialto_runtime::BridgeEthPoACall::import_signed_headers {
+			headers_with_receipts: headers
 				.into_iter()
 				.map(|header| {
 					(
@@ -94,22 +97,23 @@ impl BridgeInstance for Kovan {
 					)
 				})
 				.collect(),
-		);
+		};
 
 		rialto_runtime::Call::BridgeKovan(pallet_call)
 	}
 
 	fn build_unsigned_header_call(&self, header: QueuedEthereumHeader) -> Call {
-		let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header(
-			into_substrate_ethereum_header(header.header()),
-			into_substrate_ethereum_receipts(header.extra()),
-		);
+		let pallet_call = rialto_runtime::BridgeEthPoACall::import_unsigned_header {
+			header: Box::new(into_substrate_ethereum_header(header.header())),
+			receipts: into_substrate_ethereum_receipts(header.extra()),
+		};
 
 		rialto_runtime::Call::BridgeKovan(pallet_call)
 	}
 
 	fn build_currency_exchange_call(&self, proof: Proof) -> Call {
-		let pallet_call = rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction(proof);
+		let pallet_call =
+			rialto_runtime::BridgeCurrencyExchangeCall::import_peer_transaction { proof };
 		rialto_runtime::Call::BridgeKovanCurrencyExchange(pallet_call)
 	}
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/main.rs b/polkadot/bridges/relays/bin-ethereum/src/main.rs
index bcdae353d3dc4d4aab3af720a59bbea6746704dc..99e1b48968d794fd1701730fab080137c0b6d9c1 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/main.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/main.rs
@@ -16,6 +16,7 @@
 
 #![recursion_limit = "1024"]
 
+mod error;
 mod ethereum_client;
 mod ethereum_deploy_contract;
 mod ethereum_exchange;
@@ -27,6 +28,7 @@ mod rpc_errors;
 mod substrate_sync_loop;
 mod substrate_types;
 
+use anyhow::anyhow;
 use ethereum_deploy_contract::EthereumDeployContractParams;
 use ethereum_exchange::EthereumExchangeParams;
 use ethereum_exchange_submit::EthereumExchangeSubmitParams;
@@ -34,16 +36,18 @@ use ethereum_sync_loop::EthereumSyncParams;
 use headers_relay::sync::TargetTransactionMode;
 use hex_literal::hex;
 use instances::{BridgeInstance, Kovan, RialtoPoA};
+use libsecp256k1::SecretKey;
 use relay_utils::{
 	initialize::initialize_relay,
 	metrics::{MetricsAddress, MetricsParams},
 };
-use secp256k1::SecretKey;
 use sp_core::crypto::Pair;
 use substrate_sync_loop::SubstrateSyncParams;
 
 use headers_relay::sync::HeadersSyncParams;
-use relay_ethereum_client::{ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams};
+use relay_ethereum_client::{
+	ConnectionParams as EthereumConnectionParams, SigningParams as EthereumSigningParams,
+};
 use relay_rialto_client::SigningParams as RialtoSigningParams;
 use relay_substrate_client::ConnectionParams as SubstrateConnectionParams;
 use std::sync::Arc;
@@ -64,123 +68,128 @@ async fn run_command(matches: &clap::ArgMatches<'_>) {
 				Ok(ethereum_sync_params) => ethereum_sync_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error parsing parameters: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await
 			.is_err()
 			{
 				log::error!(target: "bridge", "Unable to get Substrate genesis block for Ethereum sync.");
 			};
-		}
+		},
 		("sub-to-eth", Some(sub_to_eth_matches)) => {
 			log::info!(target: "bridge", "Starting SUB âž¡ ETH relay.");
 			if substrate_sync_loop::run(match substrate_sync_params(sub_to_eth_matches) {
 				Ok(substrate_sync_params) => substrate_sync_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error parsing parameters: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await
 			.is_err()
 			{
 				log::error!(target: "bridge", "Unable to get Substrate genesis block for Substrate sync.");
 			};
-		}
+		},
 		("eth-deploy-contract", Some(eth_deploy_matches)) => {
 			log::info!(target: "bridge", "Deploying ETH contracts.");
-			ethereum_deploy_contract::run(match ethereum_deploy_contract_params(eth_deploy_matches) {
-				Ok(ethereum_deploy_params) => ethereum_deploy_params,
-				Err(err) => {
-					log::error!(target: "bridge", "Error during contract deployment: {}", err);
-					return;
-				}
-			})
+			ethereum_deploy_contract::run(
+				match ethereum_deploy_contract_params(eth_deploy_matches) {
+					Ok(ethereum_deploy_params) => ethereum_deploy_params,
+					Err(err) => {
+						log::error!(target: "bridge", "Error during contract deployment: {}", err);
+						return
+					},
+				},
+			)
 			.await;
-		}
+		},
 		("eth-submit-exchange-tx", Some(eth_exchange_submit_matches)) => {
 			log::info!(target: "bridge", "Submitting ETH âž¡ SUB exchange transaction.");
-			ethereum_exchange_submit::run(match ethereum_exchange_submit_params(eth_exchange_submit_matches) {
-				Ok(eth_exchange_submit_params) => eth_exchange_submit_params,
-				Err(err) => {
-					log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err);
-					return;
-				}
-			})
+			ethereum_exchange_submit::run(
+				match ethereum_exchange_submit_params(eth_exchange_submit_matches) {
+					Ok(eth_exchange_submit_params) => eth_exchange_submit_params,
+					Err(err) => {
+						log::error!(target: "bridge", "Error submitting Eethereum exchange transaction: {}", err);
+						return
+					},
+				},
+			)
 			.await;
-		}
+		},
 		("eth-exchange-sub", Some(eth_exchange_matches)) => {
 			log::info!(target: "bridge", "Starting ETH âž¡ SUB exchange transactions relay.");
 			ethereum_exchange::run(match ethereum_exchange_params(eth_exchange_matches) {
 				Ok(eth_exchange_params) => eth_exchange_params,
 				Err(err) => {
 					log::error!(target: "bridge", "Error relaying Ethereum transactions proofs: {}", err);
-					return;
-				}
+					return
+				},
 			})
 			.await;
-		}
+		},
 		("", _) => {
 			log::error!(target: "bridge", "No subcommand specified");
-		}
+		},
 		_ => unreachable!("all possible subcommands are checked above; qed"),
 	}
 }
 
-fn ethereum_connection_params(matches: &clap::ArgMatches) -> Result<EthereumConnectionParams, String> {
+fn ethereum_connection_params(
+	matches: &clap::ArgMatches,
+) -> anyhow::Result<EthereumConnectionParams> {
 	let mut params = EthereumConnectionParams::default();
 	if let Some(eth_host) = matches.value_of("eth-host") {
 		params.host = eth_host.into();
 	}
 	if let Some(eth_port) = matches.value_of("eth-port") {
-		params.port = eth_port
-			.parse()
-			.map_err(|e| format!("Failed to parse eth-port: {}", e))?;
+		params.port = eth_port.parse().map_err(|e| anyhow!("Failed to parse eth-port: {}", e))?;
 	}
 	Ok(params)
 }
 
-fn ethereum_signing_params(matches: &clap::ArgMatches) -> Result<EthereumSigningParams, String> {
+fn ethereum_signing_params(matches: &clap::ArgMatches) -> anyhow::Result<EthereumSigningParams> {
 	let mut params = EthereumSigningParams::default();
 	if let Some(eth_signer) = matches.value_of("eth-signer") {
-		params.signer =
-			SecretKey::parse_slice(&hex::decode(eth_signer).map_err(|e| format!("Failed to parse eth-signer: {}", e))?)
-				.map_err(|e| format!("Invalid eth-signer: {}", e))?;
+		params.signer = SecretKey::parse_slice(
+			&hex::decode(eth_signer).map_err(|e| anyhow!("Failed to parse eth-signer: {}", e))?,
+		)
+		.map_err(|e| anyhow!("Invalid eth-signer: {}", e))?;
 	}
 	if let Some(eth_chain_id) = matches.value_of("eth-chain-id") {
 		params.chain_id = eth_chain_id
 			.parse::<u64>()
-			.map_err(|e| format!("Failed to parse eth-chain-id: {}", e))?;
+			.map_err(|e| anyhow!("Failed to parse eth-chain-id: {}", e))?;
 	}
 	Ok(params)
 }
 
-fn substrate_connection_params(matches: &clap::ArgMatches) -> Result<SubstrateConnectionParams, String> {
+fn substrate_connection_params(
+	matches: &clap::ArgMatches,
+) -> anyhow::Result<SubstrateConnectionParams> {
 	let mut params = SubstrateConnectionParams::default();
 	if let Some(sub_host) = matches.value_of("sub-host") {
 		params.host = sub_host.into();
 	}
 	if let Some(sub_port) = matches.value_of("sub-port") {
-		params.port = sub_port
-			.parse()
-			.map_err(|e| format!("Failed to parse sub-port: {}", e))?;
+		params.port = sub_port.parse().map_err(|e| anyhow!("Failed to parse sub-port: {}", e))?;
 	}
 	Ok(params)
 }
 
-fn rialto_signing_params(matches: &clap::ArgMatches) -> Result<RialtoSigningParams, String> {
+fn rialto_signing_params(matches: &clap::ArgMatches) -> anyhow::Result<RialtoSigningParams> {
 	let mut params = sp_keyring::AccountKeyring::Alice.pair();
 
 	if let Some(sub_signer) = matches.value_of("sub-signer") {
 		let sub_signer_password = matches.value_of("sub-signer-password");
 		params = sp_core::sr25519::Pair::from_string(sub_signer, sub_signer_password)
-			.map_err(|e| format!("Failed to parse sub-signer: {:?}", e))?;
+			.map_err(|e| anyhow!("Failed to parse sub-signer: {:?}", e))?;
 	}
 	Ok(params)
 }
 
-fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result<EthereumSyncParams, String> {
+fn ethereum_sync_params(matches: &clap::ArgMatches) -> anyhow::Result<EthereumSyncParams> {
 	use crate::ethereum_sync_loop::consts::*;
 
 	let mut sync_params = HeadersSyncParams {
@@ -199,9 +208,9 @@ fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result<EthereumSyncParams
 
 			// tx pool won't accept too much unsigned transactions
 			sync_params.max_headers_in_submitted_status = 10;
-		}
+		},
 		Some("backup") => sync_params.target_tx_mode = TargetTransactionMode::Backup,
-		Some(mode) => return Err(format!("Invalid sub-tx-mode: {}", mode)),
+		Some(mode) => return Err(anyhow!("Invalid sub-tx-mode: {}", mode)),
 		None => sync_params.target_tx_mode = TargetTransactionMode::Signed,
 	}
 
@@ -219,12 +228,12 @@ fn ethereum_sync_params(matches: &clap::ArgMatches) -> Result<EthereumSyncParams
 	Ok(params)
 }
 
-fn substrate_sync_params(matches: &clap::ArgMatches) -> Result<SubstrateSyncParams, String> {
+fn substrate_sync_params(matches: &clap::ArgMatches) -> anyhow::Result<SubstrateSyncParams> {
 	use crate::substrate_sync_loop::consts::*;
 
 	let eth_contract_address: relay_ethereum_client::types::Address =
 		if let Some(eth_contract) = matches.value_of("eth-contract") {
-			eth_contract.parse().map_err(|e| format!("{}", e))?
+			eth_contract.parse()?
 		} else {
 			"731a10897d267e19b34503ad902d0a29173ba4b1"
 				.parse()
@@ -252,15 +261,19 @@ fn substrate_sync_params(matches: &clap::ArgMatches) -> Result<SubstrateSyncPara
 	Ok(params)
 }
 
-fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result<EthereumDeployContractParams, String> {
-	let eth_contract_code = parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| {
-		hex::decode(include_str!("../res/substrate-bridge-bytecode.hex")).expect("code is hardcoded, thus valid; qed")
-	});
+fn ethereum_deploy_contract_params(
+	matches: &clap::ArgMatches,
+) -> anyhow::Result<EthereumDeployContractParams> {
+	let eth_contract_code =
+		parse_hex_argument(matches, "eth-contract-code")?.unwrap_or_else(|| {
+			hex::decode(include_str!("../res/substrate-bridge-bytecode.hex"))
+				.expect("code is hardcoded, thus valid; qed")
+		});
 	let sub_initial_authorities_set_id = matches
 		.value_of("sub-authorities-set-id")
 		.map(|set| {
 			set.parse()
-				.map_err(|e| format!("Failed to parse sub-authorities-set-id: {}", e))
+				.map_err(|e| anyhow!("Failed to parse sub-authorities-set-id: {}", e))
 		})
 		.transpose()?;
 	let sub_initial_authorities_set = parse_hex_argument(matches, "sub-authorities-set")?;
@@ -281,21 +294,21 @@ fn ethereum_deploy_contract_params(matches: &clap::ArgMatches) -> Result<Ethereu
 	Ok(params)
 }
 
-fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<EthereumExchangeSubmitParams, String> {
+fn ethereum_exchange_submit_params(
+	matches: &clap::ArgMatches,
+) -> anyhow::Result<EthereumExchangeSubmitParams> {
 	let eth_nonce = matches
 		.value_of("eth-nonce")
 		.map(|eth_nonce| {
 			relay_ethereum_client::types::U256::from_dec_str(eth_nonce)
-				.map_err(|e| format!("Failed to parse eth-nonce: {}", e))
+				.map_err(|e| anyhow!("Failed to parse eth-nonce: {}", e))
 		})
 		.transpose()?;
 
 	let eth_amount = matches
 		.value_of("eth-amount")
 		.map(|eth_amount| {
-			eth_amount
-				.parse()
-				.map_err(|e| format!("Failed to parse eth-amount: {}", e))
+			eth_amount.parse().map_err(|e| anyhow!("Failed to parse eth-amount: {}", e))
 		})
 		.transpose()?
 		.unwrap_or_else(|| {
@@ -304,7 +317,8 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 		});
 
 	// This is the well-known Substrate account of Ferdie
-	let default_recepient = hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c");
+	let default_recepient =
+		hex!("1cbd2d43530a44705ad088af313e18f80b53ef16b36177cd4b77b846f2a5f07c");
 
 	let sub_recipient = if let Some(sub_recipient) = matches.value_of("sub-recipient") {
 		hex::decode(&sub_recipient)
@@ -319,7 +333,7 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 					Ok(sub_recipient)
 				}
 			})
-			.map_err(|e| format!("Failed to parse sub-recipient: {}", e))?
+			.map_err(|e| anyhow!("Failed to parse sub-recipient: {}", e))?
 	} else {
 		default_recepient
 	};
@@ -337,12 +351,10 @@ fn ethereum_exchange_submit_params(matches: &clap::ArgMatches) -> Result<Ethereu
 	Ok(params)
 }
 
-fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result<EthereumExchangeParams, String> {
+fn ethereum_exchange_params(matches: &clap::ArgMatches) -> anyhow::Result<EthereumExchangeParams> {
 	let mode = match matches.value_of("eth-tx-hash") {
 		Some(eth_tx_hash) => ethereum_exchange::ExchangeRelayMode::Single(
-			eth_tx_hash
-				.parse()
-				.map_err(|e| format!("Failed to parse eth-tx-hash: {}", e))?,
+			eth_tx_hash.parse().map_err(|e| anyhow!("Failed to parse eth-tx-hash: {}", e))?,
 		),
 		None => ethereum_exchange::ExchangeRelayMode::Auto(
 			matches
@@ -350,7 +362,7 @@ fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result<EthereumExchan
 				.map(|eth_start_with_block| {
 					eth_start_with_block
 						.parse()
-						.map_err(|e| format!("Failed to parse eth-start-with-block: {}", e))
+						.map_err(|e| anyhow!("Failed to parse eth-start-with-block: {}", e))
 				})
 				.transpose()?,
 		),
@@ -370,9 +382,9 @@ fn ethereum_exchange_params(matches: &clap::ArgMatches) -> Result<EthereumExchan
 	Ok(params)
 }
 
-fn metrics_params(matches: &clap::ArgMatches) -> Result<MetricsParams, String> {
+fn metrics_params(matches: &clap::ArgMatches) -> anyhow::Result<MetricsParams> {
 	if matches.is_present("no-prometheus") {
-		return Ok(None.into());
+		return Ok(None.into())
 	}
 
 	let mut metrics_params = MetricsAddress::default();
@@ -383,18 +395,18 @@ fn metrics_params(matches: &clap::ArgMatches) -> Result<MetricsParams, String> {
 	if let Some(prometheus_port) = matches.value_of("prometheus-port") {
 		metrics_params.port = prometheus_port
 			.parse()
-			.map_err(|e| format!("Failed to parse prometheus-port: {}", e))?;
+			.map_err(|e| anyhow!("Failed to parse prometheus-port: {}", e))?;
 	}
 
 	Ok(Some(metrics_params).into())
 }
 
-fn instance_params(matches: &clap::ArgMatches) -> Result<Arc<dyn BridgeInstance>, String> {
+fn instance_params(matches: &clap::ArgMatches) -> anyhow::Result<Arc<dyn BridgeInstance>> {
 	let instance = if let Some(instance) = matches.value_of("sub-pallet-instance") {
 		match instance.to_lowercase().as_str() {
 			"rialto" => Arc::new(RialtoPoA) as Arc<dyn BridgeInstance>,
 			"kovan" => Arc::new(Kovan),
-			_ => return Err("Unsupported bridge pallet instance".to_string()),
+			_ => return Err(anyhow!("Unsupported bridge pallet instance")),
 		}
 	} else {
 		unreachable!("CLI config enforces a default instance, can never be None")
@@ -403,11 +415,10 @@ fn instance_params(matches: &clap::ArgMatches) -> Result<Arc<dyn BridgeInstance>
 	Ok(instance)
 }
 
-fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> Result<Option<Vec<u8>>, String> {
+fn parse_hex_argument(matches: &clap::ArgMatches, arg: &str) -> anyhow::Result<Option<Vec<u8>>> {
 	match matches.value_of(arg) {
-		Some(value) => Ok(Some(
-			hex::decode(value).map_err(|e| format!("Failed to parse {}: {}", arg, e))?,
-		)),
+		Some(value) =>
+			Ok(Some(hex::decode(value).map_err(|e| anyhow!("Failed to parse {}: {}", arg, e))?)),
 		None => Ok(None),
 	}
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs b/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs
index d9c0f265cbb95715ec8f2375e69b2de4eb6666ee..1dadf9f7ddff5a69b650236732566fcb68a26cd3 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/rialto_client.rs
@@ -14,9 +14,9 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::ethereum_sync_loop::QueuedEthereumHeader;
-use crate::instances::BridgeInstance;
-use crate::rpc_errors::RpcError;
+use crate::{
+	ethereum_sync_loop::QueuedEthereumHeader, instances::BridgeInstance, rpc_errors::RpcError,
+};
 
 use async_trait::async_trait;
 use bp_eth_poa::AuraHeader as SubstrateEthereumHeader;
@@ -24,7 +24,9 @@ use codec::{Decode, Encode};
 use headers_relay::sync_types::SubmittedHeaders;
 use relay_ethereum_client::types::HeaderId as EthereumHeaderId;
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
-use relay_substrate_client::{Client as SubstrateClient, TransactionSignScheme};
+use relay_substrate_client::{
+	Client as SubstrateClient, TransactionSignScheme, UnsignedTransaction,
+};
 use relay_utils::HeaderId;
 use sp_core::{crypto::Pair, Bytes};
 use std::{collections::VecDeque, sync::Arc};
@@ -33,7 +35,8 @@ const ETH_API_IMPORT_REQUIRES_RECEIPTS: &str = "RialtoPoAHeaderApi_is_import_req
 const ETH_API_IS_KNOWN_BLOCK: &str = "RialtoPoAHeaderApi_is_known_block";
 const ETH_API_BEST_BLOCK: &str = "RialtoPoAHeaderApi_best_block";
 const ETH_API_BEST_FINALIZED_BLOCK: &str = "RialtoPoAHeaderApi_finalized_block";
-const EXCH_API_FILTER_TRANSACTION_PROOF: &str = "RialtoCurrencyExchangeApi_filter_transaction_proof";
+const EXCH_API_FILTER_TRANSACTION_PROOF: &str =
+	"RialtoCurrencyExchangeApi_filter_transaction_proof";
 
 type RpcResult<T> = std::result::Result<T, RpcError>;
 
@@ -41,13 +44,13 @@ type RpcResult<T> = std::result::Result<T, RpcError>;
 /// interactions involving, for example, an Ethereum bridge module.
 #[async_trait]
 pub trait SubstrateHighLevelRpc {
-	/// Returns best Ethereum block that Substrate runtime knows of.
+	/// Returns the best Ethereum block that Substrate runtime knows of.
 	async fn best_ethereum_block(&self) -> RpcResult<EthereumHeaderId>;
 	/// Returns best finalized Ethereum block that Substrate runtime knows of.
 	async fn best_ethereum_finalized_block(&self) -> RpcResult<EthereumHeaderId>;
-	/// Returns whether or not transactions receipts are required for Ethereum header submission.
+	/// Returns whether transactions receipts are required for Ethereum header submission.
 	async fn ethereum_receipts_required(&self, header: SubstrateEthereumHeader) -> RpcResult<bool>;
-	/// Returns whether or not the given Ethereum header is known to the Substrate runtime.
+	/// Returns whether the given Ethereum header is known to the Substrate runtime.
 	async fn ethereum_header_known(&self, header_id: EthereumHeaderId) -> RpcResult<bool>;
 }
 
@@ -58,7 +61,8 @@ impl SubstrateHighLevelRpc for SubstrateClient<Rialto> {
 		let data = Bytes(Vec::new());
 
 		let encoded_response = self.state_call(call, data, None).await?;
-		let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?;
+		let decoded_response: (u64, bp_eth_poa::H256) =
+			Decode::decode(&mut &encoded_response.0[..])?;
 
 		let best_header_id = HeaderId(decoded_response.0, decoded_response.1);
 		Ok(best_header_id)
@@ -69,7 +73,8 @@ impl SubstrateHighLevelRpc for SubstrateClient<Rialto> {
 		let data = Bytes(Vec::new());
 
 		let encoded_response = self.state_call(call, data, None).await?;
-		let decoded_response: (u64, bp_eth_poa::H256) = Decode::decode(&mut &encoded_response.0[..])?;
+		let decoded_response: (u64, bp_eth_poa::H256) =
+			Decode::decode(&mut &encoded_response.0[..])?;
 
 		let best_header_id = HeaderId(decoded_response.0, decoded_response.1);
 		Ok(best_header_id)
@@ -155,18 +160,25 @@ impl SubmitEthereumHeaders for SubstrateClient<Rialto> {
 		headers: Vec<QueuedEthereumHeader>,
 	) -> SubmittedHeaders<EthereumHeaderId, RpcError> {
 		let ids = headers.iter().map(|header| header.id()).collect();
+		let genesis_hash = *self.genesis_hash();
 		let submission_result = async {
-			self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| {
-				Bytes(
-					Rialto::sign_transaction(
-						*self.genesis_hash(),
-						&params,
-						transaction_nonce,
-						instance.build_signed_header_call(headers),
+			self.submit_signed_extrinsic(
+				(*params.public().as_array_ref()).into(),
+				move |_, transaction_nonce| {
+					Bytes(
+						Rialto::sign_transaction(
+							genesis_hash,
+							&params,
+							relay_substrate_client::TransactionEra::immortal(),
+							UnsignedTransaction::new(
+								instance.build_signed_header_call(headers),
+								transaction_nonce,
+							),
+						)
+						.encode(),
 					)
-					.encode(),
-				)
-			})
+				},
+			)
 			.await?;
 			Ok(())
 		}
@@ -208,8 +220,8 @@ impl SubmitEthereumHeaders for SubstrateClient<Rialto> {
 					submitted_headers.rejected.push(id);
 					submitted_headers.rejected.extend(ids);
 					submitted_headers.fatal_error = Some(error.into());
-					break;
-				}
+					break
+				},
 			}
 		}
 
@@ -257,23 +269,32 @@ impl SubmitEthereumExchangeTransactionProof for SubstrateClient<Rialto> {
 		instance: Arc<dyn BridgeInstance>,
 		proof: rialto_runtime::exchange::EthereumTransactionInclusionProof,
 	) -> RpcResult<()> {
-		self.submit_signed_extrinsic((*params.public().as_array_ref()).into(), |transaction_nonce| {
-			Bytes(
-				Rialto::sign_transaction(
-					*self.genesis_hash(),
-					&params,
-					transaction_nonce,
-					instance.build_currency_exchange_call(proof),
+		let genesis_hash = *self.genesis_hash();
+		self.submit_signed_extrinsic(
+			(*params.public().as_array_ref()).into(),
+			move |_, transaction_nonce| {
+				Bytes(
+					Rialto::sign_transaction(
+						genesis_hash,
+						&params,
+						relay_substrate_client::TransactionEra::immortal(),
+						UnsignedTransaction::new(
+							instance.build_currency_exchange_call(proof),
+							transaction_nonce,
+						),
+					)
+					.encode(),
 				)
-				.encode(),
-			)
-		})
+			},
+		)
 		.await?;
 		Ok(())
 	}
 }
 
 /// Create unsigned Substrate transaction for submitting Ethereum header.
-fn create_unsigned_submit_transaction(call: rialto_runtime::Call) -> rialto_runtime::UncheckedExtrinsic {
+fn create_unsigned_submit_transaction(
+	call: rialto_runtime::Call,
+) -> rialto_runtime::UncheckedExtrinsic {
 	rialto_runtime::UncheckedExtrinsic::new_unsigned(call)
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs b/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs
index 27b233135f325388c650065e9f6cc2dc028ab84c..e91bc363839b6b47d07526def743caf37564c7d9 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/rpc_errors.rs
@@ -17,48 +17,30 @@
 use relay_ethereum_client::Error as EthereumNodeError;
 use relay_substrate_client::Error as SubstrateNodeError;
 use relay_utils::MaybeConnectionError;
+use thiserror::Error;
 
 /// Contains common errors that can occur when
 /// interacting with a Substrate or Ethereum node
 /// through RPC.
-#[derive(Debug)]
+#[derive(Debug, Error)]
 pub enum RpcError {
 	/// The arguments to the RPC method failed to serialize.
-	Serialization(serde_json::Error),
-	/// An error occured when interacting with an Ethereum node.
-	Ethereum(EthereumNodeError),
-	/// An error occured when interacting with a Substrate node.
-	Substrate(SubstrateNodeError),
+	#[error("RPC arguments serialization failed: {0}")]
+	Serialization(#[from] serde_json::Error),
+	/// An error occurred when interacting with an Ethereum node.
+	#[error("Ethereum node error: {0}")]
+	Ethereum(#[from] EthereumNodeError),
+	/// An error occurred when interacting with a Substrate node.
+	#[error("Substrate node error: {0}")]
+	Substrate(#[from] SubstrateNodeError),
 	/// Error running relay loop.
+	#[error("{0}")]
 	SyncLoop(String),
 }
 
 impl From<RpcError> for String {
 	fn from(err: RpcError) -> Self {
-		match err {
-			RpcError::Serialization(e) => e.to_string(),
-			RpcError::Ethereum(e) => e.to_string(),
-			RpcError::Substrate(e) => e.to_string(),
-			RpcError::SyncLoop(e) => e,
-		}
-	}
-}
-
-impl From<serde_json::Error> for RpcError {
-	fn from(err: serde_json::Error) -> Self {
-		Self::Serialization(err)
-	}
-}
-
-impl From<EthereumNodeError> for RpcError {
-	fn from(err: EthereumNodeError) -> Self {
-		Self::Ethereum(err)
-	}
-}
-
-impl From<SubstrateNodeError> for RpcError {
-	fn from(err: SubstrateNodeError) -> Self {
-		Self::Substrate(err)
+		format!("{}", err)
 	}
 }
 
diff --git a/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
index 542fd41f72732632426a98062d00dabcd7cbddd1..4b5bd4fa7326c695bc91faa11e3e5b8831a6970a 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/substrate_sync_loop.rs
@@ -16,8 +16,7 @@
 
 //! Rialto-Substrate -> Ethereum PoA synchronization.
 
-use crate::ethereum_client::EthereumHighLevelRpc;
-use crate::rpc_errors::RpcError;
+use crate::{ethereum_client::EthereumHighLevelRpc, rpc_errors::RpcError};
 
 use async_trait::async_trait;
 use codec::Encode;
@@ -38,8 +37,7 @@ use relay_substrate_client::{
 use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient};
 use sp_runtime::EncodedJustification;
 
-use std::fmt::Debug;
-use std::{collections::HashSet, time::Duration};
+use std::{collections::HashSet, fmt::Debug, time::Duration};
 
 pub mod consts {
 	use super::*;
@@ -50,7 +48,8 @@ pub mod consts {
 	pub const MAX_FUTURE_HEADERS_TO_DOWNLOAD: usize = 8;
 	/// Max Ethereum headers count we want to have in 'submitted' state.
 	pub const MAX_SUBMITTED_HEADERS: usize = 4;
-	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten (pruned).
+	/// Max depth of in-memory headers in all states. Past this depth they will be forgotten
+	/// (pruned).
 	pub const PRUNE_DEPTH: u32 = 256;
 }
 
@@ -110,11 +109,7 @@ struct EthereumHeadersTarget {
 
 impl EthereumHeadersTarget {
 	fn new(client: EthereumClient, contract: Address, sign_params: EthereumSigningParams) -> Self {
-		Self {
-			client,
-			contract,
-			sign_params,
-		}
+		Self { client, contract, sign_params }
 	}
 }
 
@@ -137,11 +132,17 @@ impl TargetClient<SubstrateHeadersSyncPipeline> for EthereumHeadersTarget {
 		self.client.best_substrate_block(self.contract).await
 	}
 
-	async fn is_known_header(&self, id: RialtoHeaderId) -> Result<(RialtoHeaderId, bool), RpcError> {
+	async fn is_known_header(
+		&self,
+		id: RialtoHeaderId,
+	) -> Result<(RialtoHeaderId, bool), RpcError> {
 		self.client.substrate_header_known(self.contract, id).await
 	}
 
-	async fn submit_headers(&self, headers: Vec<QueuedRialtoHeader>) -> SubmittedHeaders<RialtoHeaderId, RpcError> {
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedRialtoHeader>,
+	) -> SubmittedHeaders<RialtoHeaderId, RpcError> {
 		self.client
 			.submit_substrate_headers(self.sign_params.clone(), self.contract, headers)
 			.await
@@ -161,7 +162,10 @@ impl TargetClient<SubstrateHeadersSyncPipeline> for EthereumHeadersTarget {
 			.await
 	}
 
-	async fn requires_extra(&self, header: QueuedRialtoHeader) -> Result<(RialtoHeaderId, bool), RpcError> {
+	async fn requires_extra(
+		&self,
+		header: QueuedRialtoHeader,
+	) -> Result<(RialtoHeaderId, bool), RpcError> {
 		Ok((header.header().id(), false))
 	}
 }
@@ -194,7 +198,7 @@ pub async fn run(params: SubstrateSyncParams) -> Result<(), RpcError> {
 		futures::future::pending(),
 	)
 	.await
-	.map_err(RpcError::SyncLoop)?;
+	.map_err(|e| RpcError::SyncLoop(e.to_string()))?;
 
 	Ok(())
 }
diff --git a/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs b/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs
index af68d7e0285557d4fc0edfd753dd1771c6a49e69..f9e6c29c6a65022f52cc6c8d35bf3fd29e5941b2 100644
--- a/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs
+++ b/polkadot/bridges/relays/bin-ethereum/src/substrate_types.rs
@@ -17,11 +17,12 @@
 //! Converting between Ethereum headers and bridge module types.
 
 use bp_eth_poa::{
-	AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry, Receipt as SubstrateEthereumReceipt,
-	TransactionOutcome as SubstrateEthereumTransactionOutcome,
+	AuraHeader as SubstrateEthereumHeader, LogEntry as SubstrateEthereumLogEntry,
+	Receipt as SubstrateEthereumReceipt, TransactionOutcome as SubstrateEthereumTransactionOutcome,
 };
 use relay_ethereum_client::types::{
-	Header as EthereumHeader, Receipt as EthereumReceipt, HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF,
+	Header as EthereumHeader, Receipt as EthereumReceipt,
+	HEADER_ID_PROOF as ETHEREUM_HEADER_ID_PROOF,
 };
 
 /// Convert Ethereum header into Ethereum header for Substrate.
@@ -68,7 +69,8 @@ pub fn into_substrate_ethereum_receipt(receipt: &EthereumReceipt) -> SubstrateEt
 			})
 			.collect(),
 		outcome: match (receipt.status, receipt.root) {
-			(Some(status), None) => SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8),
+			(Some(status), None) =>
+				SubstrateEthereumTransactionOutcome::StatusCode(status.as_u64() as u8),
 			(None, Some(root)) => SubstrateEthereumTransactionOutcome::StateRoot(root),
 			_ => SubstrateEthereumTransactionOutcome::Unknown,
 		},
diff --git a/polkadot/bridges/relays/bin-substrate/Cargo.toml b/polkadot/bridges/relays/bin-substrate/Cargo.toml
index 99f56cc3be483cb91c6708a0effeb50eee64871e..a28c61262f403d1eb6b94b7dcafaba14c72d1c6e 100644
--- a/polkadot/bridges/relays/bin-substrate/Cargo.toml
+++ b/polkadot/bridges/relays/bin-substrate/Cargo.toml
@@ -8,15 +8,16 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 [dependencies]
 anyhow = "1.0"
 async-std = "1.9.0"
-async-trait = "0.1.42"
-codec = { package = "parity-scale-codec", version = "2.0.0" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 futures = "0.3.12"
 hex = "0.4"
 log = "0.4.14"
 num-format = "0.4"
 num-traits = "0.2"
 paste = "1.0"
+rand = "0.8"
 structopt = "0.3"
+strum = { version = "0.21.0", features = ["derive"] }
 
 # Bridge dependencies
 
@@ -27,38 +28,52 @@ bp-message-dispatch = { path = "../../primitives/message-dispatch" }
 bp-millau = { path = "../../primitives/chain-millau" }
 bp-polkadot = { path = "../../primitives/chain-polkadot" }
 bp-rialto = { path = "../../primitives/chain-rialto" }
+bp-rialto-parachain = { path = "../../primitives/chain-rialto-parachain" }
 bp-rococo = { path = "../../primitives/chain-rococo" }
+bp-token-swap = { path = "../../primitives/token-swap" }
 bp-wococo = { path = "../../primitives/chain-wococo" }
 bp-runtime = { path = "../../primitives/runtime" }
 bp-westend = { path = "../../primitives/chain-westend" }
 bridge-runtime-common = { path = "../../bin/runtime-common" }
-finality-grandpa = "0.14.1"
 finality-relay = { path = "../finality" }
-headers-relay = { path = "../headers" }
 messages-relay = { path = "../messages" }
 millau-runtime = { path = "../../bin/millau/runtime" }
+pallet-bridge-dispatch = { path = "../../modules/dispatch" }
 pallet-bridge-messages = { path = "../../modules/messages" }
+pallet-bridge-token-swap = { path = "../../modules/token-swap" }
 relay-kusama-client = { path = "../client-kusama" }
 relay-millau-client = { path = "../client-millau" }
 relay-polkadot-client = { path = "../client-polkadot" }
 relay-rialto-client = { path = "../client-rialto" }
+relay-rialto-parachain-client = { path = "../client-rialto-parachain" }
 relay-rococo-client = { path = "../client-rococo" }
 relay-wococo-client = { path = "../client-wococo" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
 relay-westend-client = { path = "../client-westend" }
+rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" }
 rialto-runtime = { path = "../../bin/rialto/runtime" }
+substrate-relay-helper = { path = "../lib-substrate-relay" }
 
 # Substrate Dependencies
 
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
+pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" }
 
+# Polkadot Dependencies
+
+polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" }
+
 [dev-dependencies]
 hex-literal = "0.3"
 pallet-bridge-grandpa = { path = "../../modules/grandpa" }
 sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
+tempfile = "3.2"
+finality-grandpa = { version = "0.14.0" }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9e5351672dada7d765dc7ed1dd68012894b5a71f
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs
@@ -0,0 +1,103 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use codec::Decode;
+use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight};
+use relay_kusama_client::Kusama;
+use sp_version::RuntimeVersion;
+
+use crate::cli::{
+	bridge,
+	encode_call::{Call, CliEncodeCall},
+	encode_message, CliChain,
+};
+
+/// Weight of the `system::remark` call at Kusama.
+///
+/// This weight is larger (x2) than actual weight at current Kusama runtime to avoid unsuccessful
+/// calls in the future. But since it is used only in tests (and on test chains), this is ok.
+pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000;
+
+/// Id of Kusama token that is used to fetch token price.
+pub(crate) const TOKEN_ID: &str = "kusama";
+
+impl CliEncodeCall for Kusama {
+	fn max_extrinsic_size() -> u32 {
+		bp_kusama::max_extrinsic_size()
+	}
+
+	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
+		Ok(match call {
+			Call::Remark { remark_payload, .. } => relay_kusama_client::runtime::Call::System(
+				relay_kusama_client::runtime::SystemCall::remark(
+					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				),
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::KUSAMA_TO_POLKADOT_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_kusama_client::runtime::Call::BridgePolkadotMessages(
+							relay_kusama_client::runtime::BridgePolkadotMessagesCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
+			_ => anyhow::bail!("Unsupported Kusama call: {:?}", call),
+		})
+	}
+
+	fn get_dispatch_info(
+		call: &relay_kusama_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
+		match *call {
+			relay_kusama_client::runtime::Call::System(
+				relay_kusama_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::kusama::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
+			_ => anyhow::bail!("Unsupported Kusama call: {:?}", call),
+		}
+	}
+}
+
+impl CliChain for Kusama {
+	const RUNTIME_VERSION: RuntimeVersion = bp_kusama::VERSION;
+
+	type KeyPair = sp_core::sr25519::Pair;
+	type MessagePayload = ();
+
+	fn ss58_format() -> u16 {
+		42
+	}
+
+	fn max_extrinsic_weight() -> Weight {
+		bp_kusama::max_extrinsic_weight()
+	}
+
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		anyhow::bail!("Sending messages from Kusama is not yet supported.")
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ea362bd202b1d73f763ab5e6e44a20b70dfc4b0a
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs
@@ -0,0 +1,171 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Kusama-to-Polkadot headers sync entrypoint.
+
+use codec::Encode;
+use sp_core::{Bytes, Pair};
+
+use bp_header_chain::justification::GrandpaJustification;
+use relay_kusama_client::{Kusama, SyncHeader as KusamaSyncHeader};
+use relay_polkadot_client::{Polkadot, SigningParams as PolkadotSigningParams};
+use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
+
+/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
+/// relay as gone wild.
+///
+/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 21
+/// DOT, but let's round up to 30 DOT here.
+pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 30_000_000_000;
+
+/// Kusama-to-Polkadot finality sync pipeline.
+pub(crate) type FinalityPipelineKusamaFinalityToPolkadot =
+	SubstrateFinalityToSubstrate<Kusama, Polkadot, PolkadotSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct KusamaFinalityToPolkadot {
+	finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot,
+}
+
+impl KusamaFinalityToPolkadot {
+	pub fn new(target_client: Client<Polkadot>, target_sign: PolkadotSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelineKusamaFinalityToPolkadot::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
+
+impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot {
+	type FinalitySyncPipeline = FinalityPipelineKusamaFinalityToPolkadot;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+
+	type TargetChain = Polkadot;
+
+	fn customize_metrics(params: MetricsParams) -> anyhow::Result<MetricsParams> {
+		crate::chains::add_polkadot_kusama_price_metrics::<Self::FinalitySyncPipeline>(
+			Some(finality_relay::metrics_prefix::<Self::FinalitySyncPipeline>()),
+			params,
+		)
+	}
+
+	fn start_relay_guards(&self) {
+		relay_substrate_client::guard::abort_on_spec_version_change(
+			self.finality_pipeline.target_client.clone(),
+			bp_polkadot::VERSION.spec_version,
+		);
+		relay_substrate_client::guard::abort_when_account_balance_decreased(
+			self.finality_pipeline.target_client.clone(),
+			self.transactions_author(),
+			MAXIMAL_BALANCE_DECREASE_PER_DAY,
+		);
+	}
+
+	fn transactions_author(&self) -> bp_polkadot::AccountId {
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
+	}
+
+	fn make_submit_finality_proof_transaction(
+		&self,
+		era: bp_runtime::TransactionEraOf<Polkadot>,
+		transaction_nonce: bp_runtime::IndexOf<Polkadot>,
+		header: KusamaSyncHeader,
+		proof: GrandpaJustification<bp_kusama::Header>,
+	) -> Bytes {
+		let call = relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa(
+			relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::submit_finality_proof(
+				Box::new(header.into_inner()),
+				proof,
+			),
+		);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Polkadot::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+
+		Bytes(transaction.encode())
+	}
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+	use super::*;
+	use frame_support::weights::WeightToFeePolynomial;
+	use pallet_bridge_grandpa::weights::WeightInfo;
+
+	pub fn compute_maximal_balance_decrease_per_day<B, W>(expected_source_headers_per_day: u32) -> B
+	where
+		B: From<u32> + std::ops::Mul<Output = B>,
+		W: WeightToFeePolynomial<Balance = B>,
+	{
+		// we assume that the GRANDPA is not lagging here => ancestry length will be near to 0
+		// (let's round up to 2)
+		const AVG_VOTES_ANCESTRIES_LEN: u32 = 2;
+		// let's assume number of validators is 1024 (more than on any existing well-known chain
+		// atm) => number of precommits is *2/3 + 1
+		const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1;
+
+		// GRANDPA pallet weights. We're now using Rialto weights everywhere.
+		//
+		// Using Rialto runtime is slightly incorrect, because `DbWeight` of other runtimes may
+		// differ from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is
+		// the same.
+		type GrandpaPalletWeights =
+			pallet_bridge_grandpa::weights::RialtoWeight<rialto_runtime::Runtime>;
+
+		// The following formula shall not be treated as super-accurate - guard is to protect from
+		// mad relays, not to protect from over-average loses.
+
+		// increase number of headers a bit
+		let expected_source_headers_per_day = expected_source_headers_per_day * 110 / 100;
+		let single_source_header_submit_call_weight = GrandpaPalletWeights::submit_finality_proof(
+			AVG_VOTES_ANCESTRIES_LEN,
+			AVG_PRECOMMITS_LEN,
+		);
+		// for simplicity - add extra weight for base tx fee + fee that is paid for the tx size +
+		// adjusted fee
+		let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2;
+		let single_source_header_tx_cost = W::calc(&single_source_header_submit_tx_weight);
+		single_source_header_tx_cost * B::from(expected_source_headers_per_day)
+	}
+
+	#[test]
+	fn maximal_balance_decrease_per_day_is_sane() {
+		// we expect Kusama -> Polkadot relay to be running in mandatory-headers-only mode
+		// => we expect single header for every Kusama session
+		let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::<
+			bp_polkadot::Balance,
+			bp_polkadot::WeightToFee,
+		>(bp_kusama::DAYS / bp_kusama::SESSION_LENGTH + 1);
+		assert!(
+			MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease,
+			"Maximal expected loss per day {} is larger than hardcoded {}",
+			maximal_balance_decrease,
+			MAXIMAL_BALANCE_DECREASE_PER_DAY,
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs
new file mode 100644
index 0000000000000000000000000000000000000000..9c9dee150dc46b2beaaf61cbd9cffa4a1fa21591
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs
@@ -0,0 +1,345 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Kusama-to-Polkadot messages sync entrypoint.
+
+use std::ops::RangeInclusive;
+
+use codec::Encode;
+use frame_support::weights::Weight;
+use sp_core::{Bytes, Pair};
+use sp_runtime::{FixedPointNumber, FixedU128};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_kusama_client::{
+	HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams,
+};
+use relay_polkadot_client::{
+	HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams,
+};
+use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
+
+/// Kusama-to-Polkadot message lane.
+pub type MessageLaneKusamaMessagesToPolkadot =
+	SubstrateMessageLaneToSubstrate<Kusama, KusamaSigningParams, Polkadot, PolkadotSigningParams>;
+
+#[derive(Clone)]
+pub struct KusamaMessagesToPolkadot {
+	message_lane: MessageLaneKusamaMessagesToPolkadot,
+}
+
+impl SubstrateMessageLane for KusamaMessagesToPolkadot {
+	type MessageLane = MessageLaneKusamaMessagesToPolkadot;
+
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_polkadot::TO_POLKADOT_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
+		bp_polkadot::TO_POLKADOT_LATEST_GENERATED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_polkadot::TO_POLKADOT_LATEST_RECEIVED_NONCE_METHOD;
+
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_kusama::FROM_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
+		bp_kusama::FROM_KUSAMA_LATEST_CONFIRMED_NONCE_METHOD;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_kusama::FROM_KUSAMA_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str =
+		bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str =
+		bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_polkadot::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+
+	type SourceChain = Kusama;
+	type TargetChain = Polkadot;
+
+	fn source_transactions_author(&self) -> bp_kusama::AccountId {
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
+	}
+
+	fn make_messages_receiving_proof_transaction(
+		&self,
+		best_block_id: KusamaHeaderId,
+		transaction_nonce: bp_runtime::IndexOf<Kusama>,
+		_generated_at_block: PolkadotHeaderId,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
+	) -> Bytes {
+		let (relayers_state, proof) = proof;
+		let call = relay_kusama_client::runtime::Call::BridgePolkadotMessages(
+			relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_delivery_proof(
+				proof,
+				relayers_state,
+			),
+		);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Kusama::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+		log::trace!(
+			target: "bridge",
+			"Prepared Polkadot -> Kusama confirmation transaction. Weight: <unknown>/{}, size: {}/{}",
+			bp_kusama::max_extrinsic_weight(),
+			transaction.encode().len(),
+			bp_kusama::max_extrinsic_size(),
+		);
+		Bytes(transaction.encode())
+	}
+
+	fn target_transactions_author(&self) -> bp_polkadot::AccountId {
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
+	}
+
+	fn make_messages_delivery_transaction(
+		&self,
+		best_block_id: PolkadotHeaderId,
+		transaction_nonce: bp_runtime::IndexOf<Polkadot>,
+		_generated_at_header: KusamaHeaderId,
+		_nonces: RangeInclusive<MessageNonce>,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
+	) -> Bytes {
+		let (dispatch_weight, proof) = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
+		let messages_count = nonces_end - nonces_start + 1;
+
+		let call = relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
+			relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_proof(
+				self.message_lane.relayer_id_at_source.clone(),
+				proof,
+				messages_count as _,
+				dispatch_weight,
+			),
+		);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Polkadot::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+		log::trace!(
+			target: "bridge",
+			"Prepared Kusama -> Polkadot delivery transaction. Weight: <unknown>/{}, size: {}/{}",
+			bp_polkadot::max_extrinsic_weight(),
+			transaction.encode().len(),
+			bp_polkadot::max_extrinsic_size(),
+		);
+		Bytes(transaction.encode())
+	}
+}
+
+/// Kusama node as messages source.
+type KusamaSourceClient = SubstrateMessagesSource<KusamaMessagesToPolkadot>;
+
+/// Polkadot node as messages target.
+type PolkadotTargetClient = SubstrateMessagesTarget<KusamaMessagesToPolkadot>;
+
+/// Run Kusama-to-Polkadot messages sync.
+pub async fn run(
+	params: MessagesRelayParams<
+		Kusama,
+		KusamaSigningParams,
+		Polkadot,
+		PolkadotSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Kusama::AVERAGE_BLOCK_INTERVAL,
+		Polkadot::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
+	let relayer_id_at_kusama = (*params.source_sign.public().as_array_ref()).into();
+
+	let lane_id = params.lane_id;
+	let source_client = params.source_client;
+	let lane = KusamaMessagesToPolkadot {
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_kusama,
+		},
+	};
+
+	// 2/3 is reserved for proofs and tx overhead
+	let max_messages_size_in_single_batch = bp_polkadot::max_extrinsic_size() / 3;
+	// we don't know exact weights of the Polkadot runtime. So to guess weights we'll be using
+	// weights from Rialto and then simply dividing it by x2.
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
+			bp_polkadot::max_extrinsic_weight(),
+			bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+		);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
+
+	log::info!(
+		target: "bridge",
+		"Starting Kusama -> Polkadot messages relay.\n\t\
+			Kusama relayer account id: {:?}\n\t\
+			Max messages in single transaction: {}\n\t\
+			Max messages size in single transaction: {}\n\t\
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
+		max_messages_in_single_batch,
+		max_messages_size_in_single_batch,
+		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
+	);
+
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<KusamaMessagesToPolkadot as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
+	messages_relay::message_lane_loop::run(
+		messages_relay::message_lane_loop::Params {
+			lane: lane_id,
+			source_tick: Kusama::AVERAGE_BLOCK_INTERVAL,
+			target_tick: Polkadot::AVERAGE_BLOCK_INTERVAL,
+			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
+			stall_timeout,
+			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
+				max_unrewarded_relayer_entries_at_target:
+					bp_polkadot::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_polkadot::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_messages_in_single_batch,
+				max_messages_weight_in_single_batch,
+				max_messages_size_in_single_batch,
+				relay_strategy: params.relay_strategy,
+			},
+		},
+		KusamaSourceClient::new(
+			source_client.clone(),
+			lane.clone(),
+			lane_id,
+			params.target_to_source_headers_relay,
+		),
+		PolkadotTargetClient::new(
+			params.target_client,
+			lane,
+			lane_id,
+			metrics_values,
+			params.source_to_target_headers_relay,
+		),
+		metrics_params,
+		futures::future::pending(),
+	)
+	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Kusama -> Polkadot messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Kusama>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	let polkadot_to_kusama_conversion_rate_key = bp_runtime::storage_parameter_key(
+		bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME,
+	)
+	.0;
+
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<KusamaMessagesToPolkadot>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		Some(crate::chains::polkadot::TOKEN_ID),
+		Some(crate::chains::kusama::TOKEN_ID),
+		Some((
+			sp_core::storage::StorageKey(polkadot_to_kusama_conversion_rate_key),
+			// starting relay before this parameter will be set to some value may cause troubles
+			FixedU128::from_inner(FixedU128::DIV),
+		)),
+	)
+}
+
+/// Update Polkadot -> Kusama conversion rate, stored in Kusama runtime storage.
+pub(crate) async fn update_polkadot_to_kusama_conversion_rate(
+	client: Client<Kusama>,
+	signer: <Kusama as TransactionSignScheme>::AccountKeyPair,
+	updated_rate: f64,
+) -> anyhow::Result<()> {
+	let genesis_hash = *client.genesis_hash();
+	let signer_id = (*signer.public().as_array_ref()).into();
+	client
+		.submit_signed_extrinsic(signer_id, move |_, transaction_nonce| {
+			Bytes(
+				Kusama::sign_transaction(
+					genesis_hash,
+					&signer,
+					relay_substrate_client::TransactionEra::immortal(),
+					UnsignedTransaction::new(
+						relay_kusama_client::runtime::Call::BridgePolkadotMessages(
+							relay_kusama_client::runtime::BridgePolkadotMessagesCall::update_pallet_parameter(
+								relay_kusama_client::runtime::BridgePolkadotMessagesParameter::PolkadotToKusamaConversionRate(
+									sp_runtime::FixedU128::from_float(updated_rate),
+								)
+							)
+						),
+						transaction_nonce,
+					),
+				)
+					.encode(),
+			)
+		})
+		.await
+		.map(drop)
+		.map_err(|err| anyhow::format_err!("{:?}", err))
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs
index 3cba16ea32f44160aebf684ab7f82a120bf2cc54..1dbeab9a90498e52e1417320979959e26ef09b3b 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs
@@ -19,8 +19,11 @@
 use crate::cli::{
 	bridge,
 	encode_call::{self, Call, CliEncodeCall},
-	encode_message, send_message, CliChain,
+	encode_message,
+	send_message::{self, DispatchFeePayment},
+	CliChain,
 };
+use anyhow::anyhow;
 use bp_message_dispatch::{CallOrigin, MessagePayload};
 use codec::Decode;
 use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight};
@@ -35,31 +38,32 @@ impl CliEncodeCall for Millau {
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
 			Call::Raw { data } => Decode::decode(&mut &*data.0)?,
-			Call::Remark { remark_payload, .. } => millau_runtime::Call::System(millau_runtime::SystemCall::remark(
-				remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-			)),
-			Call::Transfer { recipient, amount } => millau_runtime::Call::Balances(
-				millau_runtime::BalancesCall::transfer(recipient.raw_id(), amount.cast()),
-			),
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::MILLAU_TO_RIALTO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					millau_runtime::Call::BridgeRialtoMessages(millau_runtime::MessagesCall::send_message(
-						lane.0,
-						payload,
-						fee.cast(),
-					))
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
-				),
-			},
+			Call::Remark { remark_payload, .. } =>
+				millau_runtime::Call::System(millau_runtime::SystemCall::remark {
+					remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				}),
+			Call::Transfer { recipient, amount } =>
+				millau_runtime::Call::Balances(millau_runtime::BalancesCall::transfer {
+					dest: recipient.raw_id(),
+					value: amount.cast(),
+				}),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::MILLAU_TO_RIALTO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						millau_runtime::Call::BridgeRialtoMessages(
+							millau_runtime::MessagesCall::send_message {
+								lane_id: lane.0,
+								payload,
+								delivery_and_dispatch_fee: fee.cast(),
+							},
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 		})
 	}
 
@@ -72,7 +76,12 @@ impl CliChain for Millau {
 	const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION;
 
 	type KeyPair = sp_core::sr25519::Pair;
-	type MessagePayload = MessagePayload<bp_millau::AccountId, bp_rialto::AccountSigner, bp_rialto::Signature, Vec<u8>>;
+	type MessagePayload = MessagePayload<
+		bp_millau::AccountId,
+		bp_rialto::AccountSigner,
+		bp_rialto::Signature,
+		Vec<u8>,
+	>;
 
 	fn ss58_format() -> u16 {
 		millau_runtime::SS58Prefix::get() as u16
@@ -83,10 +92,12 @@ impl CliChain for Millau {
 	}
 
 	// TODO [#854|#843] support multiple bridges?
-	fn encode_message(message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
 		match message {
 			encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0)
-				.map_err(|e| format!("Failed to decode Millau's MessagePayload: {:?}", e)),
+				.map_err(|e| anyhow!("Failed to decode Millau's MessagePayload: {:?}", e)),
 			encode_message::MessagePayload::Call { mut call, mut sender } => {
 				type Source = Millau;
 				type Target = relay_rialto_client::Rialto;
@@ -94,12 +105,21 @@ impl CliChain for Millau {
 				sender.enforce_chain::<Source>();
 				let spec_version = Target::RUNTIME_VERSION.spec_version;
 				let origin = CallOrigin::SourceAccount(sender.raw_id());
-				encode_call::preprocess_call::<Source, Target>(&mut call, bridge::MILLAU_TO_RIALTO_INDEX);
-				let call = Target::encode_call(&call).map_err(|e| e.to_string())?;
+				encode_call::preprocess_call::<Source, Target>(
+					&mut call,
+					bridge::MILLAU_TO_RIALTO_INDEX,
+				);
+				let call = Target::encode_call(&call)?;
 				let weight = call.get_dispatch_info().weight;
 
-				Ok(send_message::message_payload(spec_version, weight, origin, &call))
-			}
+				Ok(send_message::message_payload(
+					spec_version,
+					weight,
+					origin,
+					&call,
+					DispatchFeePayment::AtSourceChain,
+				))
+			},
 		}
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
index 58f0620b0764bf000b903f41476e2697b1d895f4..14a0430f6a9182f8487e9cf1e1177cb18d3db6e2 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs
@@ -16,37 +16,64 @@
 
 //! Millau-to-Rialto headers sync entrypoint.
 
-use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use codec::Encode;
+use sp_core::{Bytes, Pair};
 
 use bp_header_chain::justification::GrandpaJustification;
-use codec::Encode;
 use relay_millau_client::{Millau, SyncHeader as MillauSyncHeader};
 use relay_rialto_client::{Rialto, SigningParams as RialtoSigningParams};
-use relay_substrate_client::{Chain, TransactionSignScheme};
-use sp_core::{Bytes, Pair};
+use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Millau-to-Rialto finality sync pipeline.
-pub(crate) type MillauFinalityToRialto = SubstrateFinalityToSubstrate<Millau, Rialto, RialtoSigningParams>;
+pub(crate) type FinalityPipelineMillauToRialto =
+	SubstrateFinalityToSubstrate<Millau, Rialto, RialtoSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct MillauFinalityToRialto {
+	finality_pipeline: FinalityPipelineMillauToRialto,
+}
+
+impl MillauFinalityToRialto {
+	pub fn new(target_client: Client<Rialto>, target_sign: RialtoSigningParams) -> Self {
+		Self { finality_pipeline: FinalityPipelineMillauToRialto::new(target_client, target_sign) }
+	}
+}
 
 impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto {
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	type FinalitySyncPipeline = FinalityPipelineMillauToRialto;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
 
 	type TargetChain = Rialto;
 
 	fn transactions_author(&self) -> bp_rialto::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Rialto as Chain>::Index,
+		era: bp_runtime::TransactionEraOf<Rialto>,
+		transaction_nonce: IndexOf<Rialto>,
 		header: MillauSyncHeader,
 		proof: GrandpaJustification<bp_millau::Header>,
 	) -> Bytes {
-		let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof(header.into_inner(), proof).into();
+		let call = rialto_runtime::BridgeGrandpaMillauCall::submit_finality_proof {
+			finality_target: Box::new(header.into_inner()),
+			justification: proof,
+		}
+		.into();
 
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Rialto::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 
 		Bytes(transaction.encode())
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
index 31dc51e9c27bb8d8c78fb39f18a3fffc97dc838a..3661eb78c19f9cc22ee2422d29a68973ec11b5aa 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs
@@ -16,64 +16,100 @@
 
 //! Millau-to-Rialto messages sync entrypoint.
 
-use crate::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
-};
-use crate::messages_source::SubstrateMessagesSource;
-use crate::messages_target::SubstrateMessagesTarget;
+use std::ops::RangeInclusive;
 
-use bp_messages::MessageNonce;
-use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID};
-use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use codec::Encode;
 use frame_support::dispatch::GetDispatchInfo;
-use messages_relay::message_lane::MessageLane;
-use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams};
-use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams};
-use relay_substrate_client::{
-	metrics::{FloatStorageValueMetric, StorageProofOverheadMetric},
-	Chain, TransactionSignScheme,
-};
 use sp_core::{Bytes, Pair};
-use std::{ops::RangeInclusive, time::Duration};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use frame_support::weights::Weight;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_millau_client::{
+	HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams,
+};
+use relay_rialto_client::{
+	HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams,
+};
+use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
 
 /// Millau-to-Rialto message lane.
-pub type MillauMessagesToRialto =
+pub type MessageLaneMillauMessagesToRialto =
 	SubstrateMessageLaneToSubstrate<Millau, MillauSigningParams, Rialto, RialtoSigningParams>;
 
+#[derive(Clone)]
+pub struct MillauMessagesToRialto {
+	message_lane: MessageLaneMillauMessagesToRialto,
+}
+
 impl SubstrateMessageLane for MillauMessagesToRialto {
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD;
+	type MessageLane = MessageLaneMillauMessagesToRialto;
+
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_rialto::TO_RIALTO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rialto::TO_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_millau::FROM_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_millau::FROM_MILLAU_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_millau::FROM_MILLAU_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Millau;
 	type TargetChain = Rialto;
 
 	fn source_transactions_author(&self) -> bp_millau::AccountId {
-		(*self.source_sign.public().as_array_ref()).into()
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_receiving_proof_transaction(
 		&self,
-		transaction_nonce: <Millau as Chain>::Index,
+		best_block_id: MillauHeaderId,
+		transaction_nonce: IndexOf<Millau>,
 		_generated_at_block: RialtoHeaderId,
-		proof: <Self as MessageLane>::MessagesReceivingProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call: millau_runtime::Call =
-			millau_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into();
+			millau_runtime::MessagesCall::receive_messages_delivery_proof { proof, relayers_state }
+				.into();
 		let call_weight = call.get_dispatch_info().weight;
-		let genesis_hash = *self.source_client.genesis_hash();
-		let transaction = Millau::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Millau::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Rialto -> Millau confirmation transaction. Weight: {}/{}, size: {}/{}",
@@ -86,33 +122,38 @@ impl SubstrateMessageLane for MillauMessagesToRialto {
 	}
 
 	fn target_transactions_author(&self) -> bp_rialto::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_delivery_transaction(
 		&self,
-		transaction_nonce: <Rialto as Chain>::Index,
+		best_block_id: RialtoHeaderId,
+		transaction_nonce: IndexOf<Rialto>,
 		_generated_at_header: MillauHeaderId,
 		_nonces: RangeInclusive<MessageNonce>,
-		proof: <Self as MessageLane>::MessagesProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
-		let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof(
-			self.relayer_id_at_source.clone(),
+		let call: rialto_runtime::Call = rialto_runtime::MessagesCall::receive_messages_proof {
+			relayer_id_at_bridged_chain: self.message_lane.relayer_id_at_source.clone(),
 			proof,
-			messages_count as _,
+			messages_count: messages_count as _,
 			dispatch_weight,
-		)
+		}
 		.into();
 		let call_weight = call.get_dispatch_info().weight;
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Rialto::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Rialto::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Millau -> Rialto delivery transaction. Weight: {}/{}, size: {}/{}",
@@ -126,35 +167,51 @@ impl SubstrateMessageLane for MillauMessagesToRialto {
 }
 
 /// Millau node as messages source.
-type MillauSourceClient =
-	SubstrateMessagesSource<Millau, MillauMessagesToRialto, millau_runtime::WithRialtoMessagesInstance>;
+type MillauSourceClient = SubstrateMessagesSource<MillauMessagesToRialto>;
 
 /// Rialto node as messages target.
-type RialtoTargetClient =
-	SubstrateMessagesTarget<Rialto, MillauMessagesToRialto, rialto_runtime::WithMillauMessagesInstance>;
+type RialtoTargetClient = SubstrateMessagesTarget<MillauMessagesToRialto>;
 
 /// Run Millau-to-Rialto messages sync.
 pub async fn run(
-	params: MessagesRelayParams<Millau, MillauSigningParams, Rialto, RialtoSigningParams>,
-) -> Result<(), String> {
-	let stall_timeout = Duration::from_secs(5 * 60);
+	params: MessagesRelayParams<
+		Millau,
+		MillauSigningParams,
+		Rialto,
+		RialtoSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Millau::AVERAGE_BLOCK_INTERVAL,
+		Rialto::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
 	let relayer_id_at_millau = (*params.source_sign.public().as_array_ref()).into();
 
 	let lane_id = params.lane_id;
 	let source_client = params.source_client;
 	let lane = MillauMessagesToRialto {
-		source_client: source_client.clone(),
-		source_sign: params.source_sign,
-		target_client: params.target_client.clone(),
-		target_sign: params.target_sign,
-		relayer_id_at_source: relayer_id_at_millau,
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_millau,
+		},
 	};
 
 	// 2/3 is reserved for proofs and tx overhead
 	let max_messages_size_in_single_batch = bp_rialto::max_extrinsic_size() / 3;
 	// TODO: use Millau weights after https://github.com/paritytech/parity-bridges-common/issues/390
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<millau_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<millau_runtime::Runtime>,
+		>(
 			bp_rialto::max_extrinsic_weight(),
 			bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
@@ -165,13 +222,25 @@ pub async fn run(
 			Millau relayer account id: {:?}\n\t\
 			Max messages in single transaction: {}\n\t\
 			Max messages size in single transaction: {}\n\t\
-			Max messages weight in single transaction: {}",
-		lane.relayer_id_at_source,
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
 		max_messages_in_single_batch,
 		max_messages_size_in_single_batch,
 		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
 	);
 
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<MillauMessagesToRialto as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
 	messages_relay::message_lane_loop::run(
 		messages_relay::message_lane_loop::Params {
 			lane: lane_id,
@@ -180,58 +249,86 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_rialto::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_rialto::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
-				relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic,
+				relay_strategy: params.relay_strategy,
 			},
 		},
 		MillauSourceClient::new(
 			source_client.clone(),
 			lane.clone(),
 			lane_id,
-			RIALTO_CHAIN_ID,
 			params.target_to_source_headers_relay,
 		),
 		RialtoTargetClient::new(
 			params.target_client,
 			lane,
 			lane_id,
-			MILLAU_CHAIN_ID,
+			metrics_values,
 			params.source_to_target_headers_relay,
 		),
-		relay_utils::relay_metrics(
-			Some(messages_relay::message_lane_loop::metrics_prefix::<
-				MillauMessagesToRialto,
-			>(&lane_id)),
-			params.metrics_params,
-		)
-		.standalone_metric(|registry, prefix| {
-			StorageProofOverheadMetric::new(
-				registry,
-				prefix,
-				source_client.clone(),
-				"millau_storage_proof_overhead".into(),
-				"Millau storage proof overhead".into(),
-			)
-		})?
-		.standalone_metric(|registry, prefix| {
-			FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new(
-				registry,
-				prefix,
-				source_client,
-				sp_core::storage::StorageKey(
-					millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(),
-				),
-				Some(millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE),
-				"millau_rialto_to_millau_conversion_rate".into(),
-				"Rialto to Millau tokens conversion rate (used by Rialto)".into(),
-			)
-		})?
-		.into_params(),
+		metrics_params,
 		futures::future::pending(),
 	)
 	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Millau -> Rialto messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Millau>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<MillauMessagesToRialto>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID),
+		Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID),
+		Some((
+			sp_core::storage::StorageKey(
+				millau_runtime::rialto_messages::RialtoToMillauConversionRate::key().to_vec(),
+			),
+			millau_runtime::rialto_messages::INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE,
+		)),
+	)
+}
+
+/// Update Rialto -> Millau conversion rate, stored in Millau runtime storage.
+pub(crate) async fn update_rialto_to_millau_conversion_rate(
+	client: Client<Millau>,
+	signer: <Millau as TransactionSignScheme>::AccountKeyPair,
+	updated_rate: f64,
+) -> anyhow::Result<()> {
+	let genesis_hash = *client.genesis_hash();
+	let signer_id = (*signer.public().as_array_ref()).into();
+	client
+		.submit_signed_extrinsic(signer_id, move |_, transaction_nonce| {
+			Bytes(
+				Millau::sign_transaction(
+					genesis_hash,
+					&signer,
+					relay_substrate_client::TransactionEra::immortal(),
+					UnsignedTransaction::new(
+						millau_runtime::MessagesCall::update_pallet_parameter {
+							parameter: millau_runtime::rialto_messages::MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(
+								sp_runtime::FixedU128::from_float(updated_rate),
+							),
+						}
+						.into(),
+						transaction_nonce,
+					),
+				)
+				.encode(),
+			)
+		})
+		.await
+		.map(drop)
+		.map_err(|err| anyhow::format_err!("{:?}", err))
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs
index 09d3c3e9c060a9f114a605e1f3ad5a3c353ffa01..a96d46d9ecc8d81f0bccd72e1994c01d2824f116 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs
@@ -16,8 +16,12 @@
 
 //! Chain-specific relayer configuration.
 
+pub mod kusama_headers_to_polkadot;
+pub mod kusama_messages_to_polkadot;
 pub mod millau_headers_to_rialto;
 pub mod millau_messages_to_rialto;
+pub mod polkadot_headers_to_kusama;
+pub mod polkadot_messages_to_kusama;
 pub mod rialto_headers_to_millau;
 pub mod rialto_messages_to_millau;
 pub mod rococo_headers_to_wococo;
@@ -26,45 +30,40 @@ pub mod westend_headers_to_millau;
 pub mod wococo_headers_to_rococo;
 pub mod wococo_messages_to_rococo;
 
+mod kusama;
 mod millau;
+mod polkadot;
 mod rialto;
+mod rialto_parachain;
 mod rococo;
 mod westend;
 mod wococo;
 
-use relay_utils::metrics::{FloatJsonValueMetric, MetricsParams};
+// Millau/Rialto tokens have no any real value, so the conversion rate we use is always 1:1. But we
+// want to test our code that is intended to work with real-value chains. So to keep it close to
+// 1:1, we'll be treating Rialto as BTC and Millau as wBTC (only in relayer).
+
+/// The identifier of token, which value is associated with Rialto token value by relayer.
+pub(crate) const RIALTO_ASSOCIATED_TOKEN_ID: &str = polkadot::TOKEN_ID;
+/// The identifier of token, which value is associated with Millau token value by relayer.
+pub(crate) const MILLAU_ASSOCIATED_TOKEN_ID: &str = kusama::TOKEN_ID;
+
+use relay_utils::metrics::MetricsParams;
 
 pub(crate) fn add_polkadot_kusama_price_metrics<T: finality_relay::FinalitySyncPipeline>(
+	prefix: Option<String>,
 	params: MetricsParams,
 ) -> anyhow::Result<MetricsParams> {
-	Ok(
-		relay_utils::relay_metrics(Some(finality_relay::metrics_prefix::<T>()), params)
-			// Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <-> Kusama
-			// relays, but we want to test metrics/dashboards in advance
-			.standalone_metric(|registry, prefix| {
-				FloatJsonValueMetric::new(
-					registry,
-					prefix,
-					"https://api.coingecko.com/api/v3/simple/price?ids=Polkadot&vs_currencies=btc".into(),
-					"$.polkadot.btc".into(),
-					"polkadot_to_base_conversion_rate".into(),
-					"Rate used to convert from DOT to some BASE tokens".into(),
-				)
-			})
-			.map_err(|e| anyhow::format_err!("{}", e))?
-			.standalone_metric(|registry, prefix| {
-				FloatJsonValueMetric::new(
-					registry,
-					prefix,
-					"https://api.coingecko.com/api/v3/simple/price?ids=Kusama&vs_currencies=btc".into(),
-					"$.kusama.btc".into(),
-					"kusama_to_base_conversion_rate".into(),
-					"Rate used to convert from KSM to some BASE tokens".into(),
-				)
-			})
-			.map_err(|e| anyhow::format_err!("{}", e))?
-			.into_params(),
-	)
+	// Polkadot/Kusama prices are added as metrics here, because atm we don't have Polkadot <->
+	// Kusama relays, but we want to test metrics/dashboards in advance
+	Ok(relay_utils::relay_metrics(prefix, params)
+		.standalone_metric(|registry, prefix| {
+			substrate_relay_helper::helpers::token_price_metric(registry, prefix, "polkadot")
+		})?
+		.standalone_metric(|registry, prefix| {
+			substrate_relay_helper::helpers::token_price_metric(registry, prefix, "kusama")
+		})?
+		.into_params())
 }
 
 #[cfg(test)]
@@ -75,7 +74,7 @@ mod tests {
 	use frame_support::dispatch::GetDispatchInfo;
 	use relay_millau_client::Millau;
 	use relay_rialto_client::Rialto;
-	use relay_substrate_client::TransactionSignScheme;
+	use relay_substrate_client::{TransactionSignScheme, UnsignedTransaction};
 	use sp_core::Pair;
 	use sp_runtime::traits::{IdentifyAccount, Verify};
 
@@ -83,7 +82,8 @@ mod tests {
 	fn millau_signature_is_valid_on_rialto() {
 		let millau_sign = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap();
 
-		let call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![]));
+		let call =
+			rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] });
 
 		let millau_public: bp_millau::AccountSigner = millau_sign.public().into();
 		let millau_account_id: bp_millau::AccountId = millau_public.into_account();
@@ -94,7 +94,8 @@ mod tests {
 			rialto_runtime::VERSION.spec_version,
 		);
 
-		let rialto_signer = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap();
+		let rialto_signer =
+			relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap();
 		let signature = rialto_signer.sign(&digest);
 
 		assert!(signature.verify(&digest[..], &rialto_signer.public()));
@@ -104,7 +105,8 @@ mod tests {
 	fn rialto_signature_is_valid_on_millau() {
 		let rialto_sign = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap();
 
-		let call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![]));
+		let call =
+			millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] });
 
 		let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into();
 		let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account();
@@ -115,7 +117,8 @@ mod tests {
 			millau_runtime::VERSION.spec_version,
 		);
 
-		let millau_signer = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap();
+		let millau_signer =
+			relay_millau_client::SigningParams::from_string("//Dave", None).unwrap();
 		let signature = millau_signer.sign(&digest);
 
 		assert!(signature.verify(&digest[..], &millau_signer.public()));
@@ -130,22 +133,27 @@ mod tests {
 			bp_millau::max_extrinsic_size(),
 		);
 
-		let call: millau_runtime::Call = millau_runtime::SystemCall::remark(vec![42; maximal_remark_size as _]).into();
+		let call: millau_runtime::Call =
+			millau_runtime::SystemCall::remark { remark: vec![42; maximal_remark_size as _] }
+				.into();
 		let payload = send_message::message_payload(
 			Default::default(),
 			call.get_dispatch_info().weight,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert_eq!(Millau::verify_message(&payload), Ok(()));
 
 		let call: millau_runtime::Call =
-			millau_runtime::SystemCall::remark(vec![42; (maximal_remark_size + 1) as _]).into();
+			millau_runtime::SystemCall::remark { remark: vec![42; (maximal_remark_size + 1) as _] }
+				.into();
 		let payload = send_message::message_payload(
 			Default::default(),
 			call.get_dispatch_info().weight,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert!(Millau::verify_message(&payload).is_err());
 	}
@@ -164,15 +172,18 @@ mod tests {
 	fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() {
 		use rialto_runtime::millau_messages::Millau;
 
-		let maximal_dispatch_weight =
-			send_message::compute_maximal_message_dispatch_weight(bp_millau::max_extrinsic_weight());
-		let call: millau_runtime::Call = rialto_runtime::SystemCall::remark(vec![]).into();
+		let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight(
+			bp_millau::max_extrinsic_weight(),
+		);
+		let call: millau_runtime::Call =
+			rialto_runtime::SystemCall::remark { remark: vec![] }.into();
 
 		let payload = send_message::message_payload(
 			Default::default(),
 			maximal_dispatch_weight,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert_eq!(Millau::verify_message(&payload), Ok(()));
 
@@ -181,6 +192,7 @@ mod tests {
 			maximal_dispatch_weight + 1,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert!(Millau::verify_message(&payload).is_err());
 	}
@@ -189,15 +201,18 @@ mod tests {
 	fn maximal_weight_fill_block_to_rialto_is_generated_correctly() {
 		use millau_runtime::rialto_messages::Rialto;
 
-		let maximal_dispatch_weight =
-			send_message::compute_maximal_message_dispatch_weight(bp_rialto::max_extrinsic_weight());
-		let call: rialto_runtime::Call = millau_runtime::SystemCall::remark(vec![]).into();
+		let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight(
+			bp_rialto::max_extrinsic_weight(),
+		);
+		let call: rialto_runtime::Call =
+			millau_runtime::SystemCall::remark { remark: vec![] }.into();
 
 		let payload = send_message::message_payload(
 			Default::default(),
 			maximal_dispatch_weight,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert_eq!(Rialto::verify_message(&payload), Ok(()));
 
@@ -206,18 +221,20 @@ mod tests {
 			maximal_dispatch_weight + 1,
 			bp_message_dispatch::CallOrigin::SourceRoot,
 			&call,
+			send_message::DispatchFeePayment::AtSourceChain,
 		);
 		assert!(Rialto::verify_message(&payload).is_err());
 	}
 
 	#[test]
 	fn rialto_tx_extra_bytes_constant_is_correct() {
-		let rialto_call = rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(vec![]));
+		let rialto_call =
+			rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] });
 		let rialto_tx = Rialto::sign_transaction(
 			Default::default(),
 			&sp_keyring::AccountKeyring::Alice.pair(),
-			0,
-			rialto_call.clone(),
+			relay_substrate_client::TransactionEra::immortal(),
+			UnsignedTransaction::new(rialto_call.clone(), 0),
 		);
 		let extra_bytes_in_transaction = rialto_tx.encode().len() - rialto_call.encode().len();
 		assert!(
@@ -230,12 +247,13 @@ mod tests {
 
 	#[test]
 	fn millau_tx_extra_bytes_constant_is_correct() {
-		let millau_call = millau_runtime::Call::System(millau_runtime::SystemCall::remark(vec![]));
+		let millau_call =
+			millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] });
 		let millau_tx = Millau::sign_transaction(
 			Default::default(),
 			&sp_keyring::AccountKeyring::Alice.pair(),
-			0,
-			millau_call.clone(),
+			relay_substrate_client::TransactionEra::immortal(),
+			UnsignedTransaction::new(millau_call.clone(), 0),
 		);
 		let extra_bytes_in_transaction = millau_tx.encode().len() - millau_call.encode().len();
 		assert!(
@@ -274,13 +292,14 @@ mod rococo_tests {
 		};
 
 		let actual = relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof(
-			header.clone(),
+			Box::new(header.clone()),
 			justification.clone(),
 		);
-		let expected = millau_runtime::BridgeGrandpaRialtoCall::<millau_runtime::Runtime>::submit_finality_proof(
-			header,
-			justification,
-		);
+		let expected =
+			millau_runtime::BridgeGrandpaCall::<millau_runtime::Runtime>::submit_finality_proof {
+				finality_target: Box::new(header),
+				justification,
+			};
 
 		// when
 		let actual_encoded = actual.encode();
@@ -321,11 +340,15 @@ mod westend_tests {
 			votes_ancestries: vec![],
 		};
 
-		let actual = bp_westend::BridgeGrandpaRococoCall::submit_finality_proof(header.clone(), justification.clone());
-		let expected = millau_runtime::BridgeGrandpaRialtoCall::<millau_runtime::Runtime>::submit_finality_proof(
-			header,
-			justification,
+		let actual = relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof(
+			Box::new(header.clone()),
+			justification.clone(),
 		);
+		let expected =
+			millau_runtime::BridgeGrandpaCall::<millau_runtime::Runtime>::submit_finality_proof {
+				finality_target: Box::new(header),
+				justification,
+			};
 
 		// when
 		let actual_encoded = actual.encode();
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs
new file mode 100644
index 0000000000000000000000000000000000000000..55d17e46f13bb7bb59873b9036eac4420921ea83
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs
@@ -0,0 +1,103 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use codec::Decode;
+use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight};
+use relay_polkadot_client::Polkadot;
+use sp_version::RuntimeVersion;
+
+use crate::cli::{
+	bridge,
+	encode_call::{Call, CliEncodeCall},
+	encode_message, CliChain,
+};
+
+/// Weight of the `system::remark` call at Polkadot.
+///
+/// This weight is larger (x2) than actual weight at current Polkadot runtime to avoid unsuccessful
+/// calls in the future. But since it is used only in tests (and on test chains), this is ok.
+pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000;
+
+/// Id of Polkadot token that is used to fetch token price.
+pub(crate) const TOKEN_ID: &str = "polkadot";
+
+impl CliEncodeCall for Polkadot {
+	fn max_extrinsic_size() -> u32 {
+		bp_polkadot::max_extrinsic_size()
+	}
+
+	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
+		Ok(match call {
+			Call::Remark { remark_payload, .. } => relay_polkadot_client::runtime::Call::System(
+				relay_polkadot_client::runtime::SystemCall::remark(
+					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				),
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::POLKADOT_TO_KUSAMA_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
+							relay_polkadot_client::runtime::BridgeKusamaMessagesCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
+			_ => anyhow::bail!("Unsupported Polkadot call: {:?}", call),
+		})
+	}
+
+	fn get_dispatch_info(
+		call: &relay_polkadot_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
+		match *call {
+			relay_polkadot_client::runtime::Call::System(
+				relay_polkadot_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::polkadot::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
+			_ => anyhow::bail!("Unsupported Polkadot call: {:?}", call),
+		}
+	}
+}
+
+impl CliChain for Polkadot {
+	const RUNTIME_VERSION: RuntimeVersion = bp_polkadot::VERSION;
+
+	type KeyPair = sp_core::sr25519::Pair;
+	type MessagePayload = ();
+
+	fn ss58_format() -> u16 {
+		42
+	}
+
+	fn max_extrinsic_weight() -> Weight {
+		bp_polkadot::max_extrinsic_weight()
+	}
+
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		anyhow::bail!("Sending messages from Polkadot is not yet supported.")
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs
new file mode 100644
index 0000000000000000000000000000000000000000..603d5ba3aa779d9b2b1988ab4622b1e38ff47a5a
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs
@@ -0,0 +1,134 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Polkadot-to-Kusama headers sync entrypoint.
+
+use codec::Encode;
+use sp_core::{Bytes, Pair};
+
+use bp_header_chain::justification::GrandpaJustification;
+use relay_kusama_client::{Kusama, SigningParams as KusamaSigningParams};
+use relay_polkadot_client::{Polkadot, SyncHeader as PolkadotSyncHeader};
+use relay_substrate_client::{Client, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
+
+/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
+/// relay as gone wild.
+///
+/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 0.001
+/// KSM, but let's round up to 0.1 KSM here.
+pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 100_000_000_000;
+
+/// Polkadot-to-Kusama finality sync pipeline.
+pub(crate) type FinalityPipelinePolkadotFinalityToKusama =
+	SubstrateFinalityToSubstrate<Polkadot, Kusama, KusamaSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct PolkadotFinalityToKusama {
+	finality_pipeline: FinalityPipelinePolkadotFinalityToKusama,
+}
+
+impl PolkadotFinalityToKusama {
+	pub fn new(target_client: Client<Kusama>, target_sign: KusamaSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelinePolkadotFinalityToKusama::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
+
+impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama {
+	type FinalitySyncPipeline = FinalityPipelinePolkadotFinalityToKusama;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+
+	type TargetChain = Kusama;
+
+	fn customize_metrics(params: MetricsParams) -> anyhow::Result<MetricsParams> {
+		crate::chains::add_polkadot_kusama_price_metrics::<Self::FinalitySyncPipeline>(
+			Some(finality_relay::metrics_prefix::<Self::FinalitySyncPipeline>()),
+			params,
+		)
+	}
+
+	fn start_relay_guards(&self) {
+		relay_substrate_client::guard::abort_on_spec_version_change(
+			self.finality_pipeline.target_client.clone(),
+			bp_kusama::VERSION.spec_version,
+		);
+		relay_substrate_client::guard::abort_when_account_balance_decreased(
+			self.finality_pipeline.target_client.clone(),
+			self.transactions_author(),
+			MAXIMAL_BALANCE_DECREASE_PER_DAY,
+		);
+	}
+
+	fn transactions_author(&self) -> bp_kusama::AccountId {
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
+	}
+
+	fn make_submit_finality_proof_transaction(
+		&self,
+		era: bp_runtime::TransactionEraOf<Kusama>,
+		transaction_nonce: bp_runtime::IndexOf<Kusama>,
+		header: PolkadotSyncHeader,
+		proof: GrandpaJustification<bp_polkadot::Header>,
+	) -> Bytes {
+		let call = relay_kusama_client::runtime::Call::BridgePolkadotGrandpa(
+			relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof(
+				Box::new(header.into_inner()),
+				proof,
+			),
+		);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Kusama::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+
+		Bytes(transaction.encode())
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day;
+
+	#[test]
+	fn maximal_balance_decrease_per_day_is_sane() {
+		// we expect Polkadot -> Kusama relay to be running in mandatory-headers-only mode
+		// => we expect single header for every Polkadot session
+		let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::<
+			bp_kusama::Balance,
+			bp_kusama::WeightToFee,
+		>(bp_polkadot::DAYS / bp_polkadot::SESSION_LENGTH + 1);
+		assert!(
+			MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease,
+			"Maximal expected loss per day {} is larger than hardcoded {}",
+			maximal_balance_decrease,
+			MAXIMAL_BALANCE_DECREASE_PER_DAY,
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs
new file mode 100644
index 0000000000000000000000000000000000000000..b1595665fb25550d0a021c1e5927fa6fcf57196f
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs
@@ -0,0 +1,344 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Polkadot-to-Kusama messages sync entrypoint.
+
+use std::ops::RangeInclusive;
+
+use codec::Encode;
+use sp_core::{Bytes, Pair};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use frame_support::weights::Weight;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_kusama_client::{
+	HeaderId as KusamaHeaderId, Kusama, SigningParams as KusamaSigningParams,
+};
+use relay_polkadot_client::{
+	HeaderId as PolkadotHeaderId, Polkadot, SigningParams as PolkadotSigningParams,
+};
+use relay_substrate_client::{Chain, Client, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use sp_runtime::{FixedPointNumber, FixedU128};
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
+
+/// Polkadot-to-Kusama message lane.
+pub type MessageLanePolkadotMessagesToKusama =
+	SubstrateMessageLaneToSubstrate<Polkadot, PolkadotSigningParams, Kusama, KusamaSigningParams>;
+
+#[derive(Clone)]
+pub struct PolkadotMessagesToKusama {
+	message_lane: MessageLanePolkadotMessagesToKusama,
+}
+
+impl SubstrateMessageLane for PolkadotMessagesToKusama {
+	type MessageLane = MessageLanePolkadotMessagesToKusama;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_kusama::TO_KUSAMA_MESSAGE_DETAILS_METHOD;
+	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
+		bp_kusama::TO_KUSAMA_LATEST_GENERATED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_kusama::TO_KUSAMA_LATEST_RECEIVED_NONCE_METHOD;
+
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_polkadot::FROM_POLKADOT_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
+		bp_polkadot::FROM_POLKADOT_LATEST_CONFIRMED_NONCE_METHOD;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_polkadot::FROM_POLKADOT_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD;
+
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str =
+		bp_polkadot::WITH_KUSAMA_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str =
+		bp_kusama::WITH_POLKADOT_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_kusama::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
+
+	type SourceChain = Polkadot;
+	type TargetChain = Kusama;
+
+	fn source_transactions_author(&self) -> bp_polkadot::AccountId {
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
+	}
+
+	fn make_messages_receiving_proof_transaction(
+		&self,
+		best_block_id: PolkadotHeaderId,
+		transaction_nonce: bp_runtime::IndexOf<Polkadot>,
+		_generated_at_block: KusamaHeaderId,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
+	) -> Bytes {
+		let (relayers_state, proof) = proof;
+		let call = relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
+			relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_delivery_proof(
+				proof,
+				relayers_state,
+			),
+		);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Polkadot::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+		log::trace!(
+			target: "bridge",
+			"Prepared Kusama -> Polkadot confirmation transaction. Weight: <unknown>/{}, size: {}/{}",
+			bp_polkadot::max_extrinsic_weight(),
+			transaction.encode().len(),
+			bp_polkadot::max_extrinsic_size(),
+		);
+		Bytes(transaction.encode())
+	}
+
+	fn target_transactions_author(&self) -> bp_kusama::AccountId {
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
+	}
+
+	fn make_messages_delivery_transaction(
+		&self,
+		best_block_id: KusamaHeaderId,
+		transaction_nonce: bp_runtime::IndexOf<Kusama>,
+		_generated_at_header: PolkadotHeaderId,
+		_nonces: RangeInclusive<MessageNonce>,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
+	) -> Bytes {
+		let (dispatch_weight, proof) = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
+		let messages_count = nonces_end - nonces_start + 1;
+
+		let call = relay_kusama_client::runtime::Call::BridgePolkadotMessages(
+			relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_proof(
+				self.message_lane.relayer_id_at_source.clone(),
+				proof,
+				messages_count as _,
+				dispatch_weight,
+			),
+		);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Kusama::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
+		log::trace!(
+			target: "bridge",
+			"Prepared Polkadot -> Kusama delivery transaction. Weight: <unknown>/{}, size: {}/{}",
+			bp_kusama::max_extrinsic_weight(),
+			transaction.encode().len(),
+			bp_kusama::max_extrinsic_size(),
+		);
+		Bytes(transaction.encode())
+	}
+}
+
+/// Polkadot node as messages source.
+type PolkadotSourceClient = SubstrateMessagesSource<PolkadotMessagesToKusama>;
+
+/// Kusama node as messages target.
+type KusamaTargetClient = SubstrateMessagesTarget<PolkadotMessagesToKusama>;
+
+/// Run Polkadot-to-Kusama messages sync.
+pub async fn run(
+	params: MessagesRelayParams<
+		Polkadot,
+		PolkadotSigningParams,
+		Kusama,
+		KusamaSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Polkadot::AVERAGE_BLOCK_INTERVAL,
+		Kusama::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
+	let relayer_id_at_polkadot = (*params.source_sign.public().as_array_ref()).into();
+
+	let lane_id = params.lane_id;
+	let source_client = params.source_client;
+	let lane = PolkadotMessagesToKusama {
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_polkadot,
+		},
+	};
+
+	// 2/3 is reserved for proofs and tx overhead
+	let max_messages_size_in_single_batch = bp_kusama::max_extrinsic_size() / 3;
+	// we don't know exact weights of the Kusama runtime. So to guess weights we'll be using
+	// weights from Rialto and then simply dividing it by x2.
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
+			bp_kusama::max_extrinsic_weight(),
+			bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+		);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
+
+	log::info!(
+		target: "bridge",
+		"Starting Polkadot -> Kusama messages relay.\n\t\
+			Polkadot relayer account id: {:?}\n\t\
+			Max messages in single transaction: {}\n\t\
+			Max messages size in single transaction: {}\n\t\
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
+		max_messages_in_single_batch,
+		max_messages_size_in_single_batch,
+		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
+	);
+
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<PolkadotMessagesToKusama as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
+	messages_relay::message_lane_loop::run(
+		messages_relay::message_lane_loop::Params {
+			lane: lane_id,
+			source_tick: Polkadot::AVERAGE_BLOCK_INTERVAL,
+			target_tick: Kusama::AVERAGE_BLOCK_INTERVAL,
+			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
+			stall_timeout,
+			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
+				max_unrewarded_relayer_entries_at_target:
+					bp_kusama::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_kusama::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_messages_in_single_batch,
+				max_messages_weight_in_single_batch,
+				max_messages_size_in_single_batch,
+				relay_strategy: params.relay_strategy,
+			},
+		},
+		PolkadotSourceClient::new(
+			source_client.clone(),
+			lane.clone(),
+			lane_id,
+			params.target_to_source_headers_relay,
+		),
+		KusamaTargetClient::new(
+			params.target_client,
+			lane,
+			lane_id,
+			metrics_values,
+			params.source_to_target_headers_relay,
+		),
+		metrics_params,
+		futures::future::pending(),
+	)
+	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Polkadot -> Kusama messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Polkadot>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	let kusama_to_polkadot_conversion_rate_key = bp_runtime::storage_parameter_key(
+		bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME,
+	)
+	.0;
+
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<PolkadotMessagesToKusama>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		Some(crate::chains::kusama::TOKEN_ID),
+		Some(crate::chains::polkadot::TOKEN_ID),
+		Some((
+			sp_core::storage::StorageKey(kusama_to_polkadot_conversion_rate_key),
+			// starting relay before this parameter will be set to some value may cause troubles
+			FixedU128::from_inner(FixedU128::DIV),
+		)),
+	)
+}
+
+/// Update Kusama -> Polkadot conversion rate, stored in Polkadot runtime storage.
+pub(crate) async fn update_kusama_to_polkadot_conversion_rate(
+	client: Client<Polkadot>,
+	signer: <Polkadot as TransactionSignScheme>::AccountKeyPair,
+	updated_rate: f64,
+) -> anyhow::Result<()> {
+	let genesis_hash = *client.genesis_hash();
+	let signer_id = (*signer.public().as_array_ref()).into();
+	client
+		.submit_signed_extrinsic(signer_id, move |_, transaction_nonce| {
+			Bytes(
+				Polkadot::sign_transaction(
+					genesis_hash,
+					&signer,
+					relay_substrate_client::TransactionEra::immortal(),
+					UnsignedTransaction::new(
+						relay_polkadot_client::runtime::Call::BridgeKusamaMessages(
+							relay_polkadot_client::runtime::BridgeKusamaMessagesCall::update_pallet_parameter(
+								relay_polkadot_client::runtime::BridgeKusamaMessagesParameter::KusamaToPolkadotConversionRate(
+									sp_runtime::FixedU128::from_float(updated_rate),
+								)
+							)
+						),
+						transaction_nonce,
+					),
+				)
+				.encode(),
+			)
+		})
+		.await
+		.map(drop)
+		.map_err(|err| anyhow::format_err!("{:?}", err))
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs
index 9a6185b4fc7d9618e52188ae40a56878161f1e52..4c1a0166ed3b28c339ced3102a368cf3bc2a37b7 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs
@@ -19,8 +19,11 @@
 use crate::cli::{
 	bridge,
 	encode_call::{self, Call, CliEncodeCall},
-	encode_message, send_message, CliChain,
+	encode_message,
+	send_message::{self, DispatchFeePayment},
+	CliChain,
 };
+use anyhow::anyhow;
 use bp_message_dispatch::{CallOrigin, MessagePayload};
 use codec::Decode;
 use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight};
@@ -35,29 +38,32 @@ impl CliEncodeCall for Rialto {
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
 			Call::Raw { data } => Decode::decode(&mut &*data.0)?,
-			Call::Remark { remark_payload, .. } => rialto_runtime::Call::System(rialto_runtime::SystemCall::remark(
-				remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-			)),
-			Call::Transfer { recipient, amount } => {
-				rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer(recipient.raw_id(), amount.0))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::RIALTO_TO_MILLAU_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					rialto_runtime::Call::BridgeMillauMessages(rialto_runtime::MessagesCall::send_message(
-						lane.0, payload, fee.0,
-					))
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
-				),
-			},
+			Call::Remark { remark_payload, .. } =>
+				rialto_runtime::Call::System(rialto_runtime::SystemCall::remark {
+					remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				}),
+			Call::Transfer { recipient, amount } =>
+				rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer {
+					dest: recipient.raw_id().into(),
+					value: amount.0,
+				}),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::RIALTO_TO_MILLAU_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						rialto_runtime::Call::BridgeMillauMessages(
+							rialto_runtime::MessagesCall::send_message {
+								lane_id: lane.0,
+								payload,
+								delivery_and_dispatch_fee: fee.0,
+							},
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 		})
 	}
 
@@ -70,7 +76,12 @@ impl CliChain for Rialto {
 	const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION;
 
 	type KeyPair = sp_core::sr25519::Pair;
-	type MessagePayload = MessagePayload<bp_rialto::AccountId, bp_millau::AccountSigner, bp_millau::Signature, Vec<u8>>;
+	type MessagePayload = MessagePayload<
+		bp_rialto::AccountId,
+		bp_millau::AccountSigner,
+		bp_millau::Signature,
+		Vec<u8>,
+	>;
 
 	fn ss58_format() -> u16 {
 		rialto_runtime::SS58Prefix::get() as u16
@@ -80,10 +91,12 @@ impl CliChain for Rialto {
 		bp_rialto::max_extrinsic_weight()
 	}
 
-	fn encode_message(message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
+	fn encode_message(
+		message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
 		match message {
 			encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0)
-				.map_err(|e| format!("Failed to decode Rialto's MessagePayload: {:?}", e)),
+				.map_err(|e| anyhow!("Failed to decode Rialto's MessagePayload: {:?}", e)),
 			encode_message::MessagePayload::Call { mut call, mut sender } => {
 				type Source = Rialto;
 				type Target = relay_millau_client::Millau;
@@ -91,12 +104,21 @@ impl CliChain for Rialto {
 				sender.enforce_chain::<Source>();
 				let spec_version = Target::RUNTIME_VERSION.spec_version;
 				let origin = CallOrigin::SourceAccount(sender.raw_id());
-				encode_call::preprocess_call::<Source, Target>(&mut call, bridge::RIALTO_TO_MILLAU_INDEX);
-				let call = Target::encode_call(&call).map_err(|e| e.to_string())?;
+				encode_call::preprocess_call::<Source, Target>(
+					&mut call,
+					bridge::RIALTO_TO_MILLAU_INDEX,
+				);
+				let call = Target::encode_call(&call)?;
 				let weight = call.get_dispatch_info().weight;
 
-				Ok(send_message::message_payload(spec_version, weight, origin, &call))
-			}
+				Ok(send_message::message_payload(
+					spec_version,
+					weight,
+					origin,
+					&call,
+					DispatchFeePayment::AtSourceChain,
+				))
+			},
 		}
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
index 39295c89433e84a1d9d50e02534a8ec85c64461f..7e76f403c55aae1a9e26cb91b48e220b5874b6d3 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs
@@ -16,41 +16,72 @@
 
 //! Rialto-to-Millau headers sync entrypoint.
 
-use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use codec::Encode;
+use sp_core::{Bytes, Pair};
 
 use bp_header_chain::justification::GrandpaJustification;
-use codec::Encode;
 use relay_millau_client::{Millau, SigningParams as MillauSigningParams};
 use relay_rialto_client::{Rialto, SyncHeader as RialtoSyncHeader};
-use relay_substrate_client::{Chain, TransactionSignScheme};
-use sp_core::{Bytes, Pair};
+use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Rialto-to-Millau finality sync pipeline.
-pub(crate) type RialtoFinalityToMillau = SubstrateFinalityToSubstrate<Rialto, Millau, MillauSigningParams>;
+pub(crate) type FinalityPipelineRialtoFinalityToMillau =
+	SubstrateFinalityToSubstrate<Rialto, Millau, MillauSigningParams>;
+
+#[derive(Clone, Debug)]
+pub struct RialtoFinalityToMillau {
+	finality_pipeline: FinalityPipelineRialtoFinalityToMillau,
+}
+
+impl RialtoFinalityToMillau {
+	pub fn new(target_client: Client<Millau>, target_sign: MillauSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelineRialtoFinalityToMillau::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
 
 impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau {
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	type FinalitySyncPipeline = FinalityPipelineRialtoFinalityToMillau;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
 
 	type TargetChain = Millau;
 
 	fn transactions_author(&self) -> bp_millau::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Millau as Chain>::Index,
+		era: bp_runtime::TransactionEraOf<Millau>,
+		transaction_nonce: IndexOf<Millau>,
 		header: RialtoSyncHeader,
 		proof: GrandpaJustification<bp_rialto::Header>,
 	) -> Bytes {
-		let call = millau_runtime::BridgeGrandpaRialtoCall::<
+		let call = millau_runtime::BridgeGrandpaCall::<
 			millau_runtime::Runtime,
 			millau_runtime::RialtoGrandpaInstance,
-		>::submit_finality_proof(header.into_inner(), proof)
+		>::submit_finality_proof {
+			finality_target: Box::new(header.into_inner()),
+			justification: proof,
+		}
 		.into();
 
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Millau::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 
 		Bytes(transaction.encode())
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
index 89f9dd7e997edffa57df4f1a48badfb775896531..50ebf264e1a49a37913d9d9eb08d8ca0af5cf2f0 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs
@@ -16,64 +16,100 @@
 
 //! Rialto-to-Millau messages sync entrypoint.
 
-use crate::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
-};
-use crate::messages_source::SubstrateMessagesSource;
-use crate::messages_target::SubstrateMessagesTarget;
+use std::ops::RangeInclusive;
 
-use bp_messages::MessageNonce;
-use bp_runtime::{MILLAU_CHAIN_ID, RIALTO_CHAIN_ID};
-use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use codec::Encode;
 use frame_support::dispatch::GetDispatchInfo;
-use messages_relay::message_lane::MessageLane;
-use relay_millau_client::{HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams};
-use relay_rialto_client::{HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams};
-use relay_substrate_client::{
-	metrics::{FloatStorageValueMetric, StorageProofOverheadMetric},
-	Chain, TransactionSignScheme,
-};
 use sp_core::{Bytes, Pair};
-use std::{ops::RangeInclusive, time::Duration};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use frame_support::weights::Weight;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_millau_client::{
+	HeaderId as MillauHeaderId, Millau, SigningParams as MillauSigningParams,
+};
+use relay_rialto_client::{
+	HeaderId as RialtoHeaderId, Rialto, SigningParams as RialtoSigningParams,
+};
+use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
 
 /// Rialto-to-Millau message lane.
-pub type RialtoMessagesToMillau =
+pub type MessageLaneRialtoMessagesToMillau =
 	SubstrateMessageLaneToSubstrate<Rialto, RialtoSigningParams, Millau, MillauSigningParams>;
 
+#[derive(Clone)]
+pub struct RialtoMessagesToMillau {
+	message_lane: MessageLaneRialtoMessagesToMillau,
+}
+
 impl SubstrateMessageLane for RialtoMessagesToMillau {
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD;
+	type MessageLane = MessageLaneRialtoMessagesToMillau;
+
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_millau::TO_MILLAU_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_millau::TO_MILLAU_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rialto::FROM_RIALTO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_rialto::FROM_RIALTO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_rialto::FROM_RIALTO_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rialto::WITH_MILLAU_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_millau::WITH_RIALTO_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Rialto;
 	type TargetChain = Millau;
 
 	fn source_transactions_author(&self) -> bp_rialto::AccountId {
-		(*self.source_sign.public().as_array_ref()).into()
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_receiving_proof_transaction(
 		&self,
-		transaction_nonce: <Rialto as Chain>::Index,
+		best_block_id: RialtoHeaderId,
+		transaction_nonce: IndexOf<Rialto>,
 		_generated_at_block: MillauHeaderId,
-		proof: <Self as MessageLane>::MessagesReceivingProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call: rialto_runtime::Call =
-			rialto_runtime::MessagesCall::receive_messages_delivery_proof(proof, relayers_state).into();
+			rialto_runtime::MessagesCall::receive_messages_delivery_proof { proof, relayers_state }
+				.into();
 		let call_weight = call.get_dispatch_info().weight;
-		let genesis_hash = *self.source_client.genesis_hash();
-		let transaction = Rialto::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Rialto::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Millau -> Rialto confirmation transaction. Weight: {}/{}, size: {}/{}",
@@ -86,33 +122,38 @@ impl SubstrateMessageLane for RialtoMessagesToMillau {
 	}
 
 	fn target_transactions_author(&self) -> bp_millau::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_delivery_transaction(
 		&self,
-		transaction_nonce: <Millau as Chain>::Index,
+		best_block_id: MillauHeaderId,
+		transaction_nonce: IndexOf<Millau>,
 		_generated_at_header: RialtoHeaderId,
 		_nonces: RangeInclusive<MessageNonce>,
-		proof: <Self as MessageLane>::MessagesProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
-		let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof(
-			self.relayer_id_at_source.clone(),
+		let call: millau_runtime::Call = millau_runtime::MessagesCall::receive_messages_proof {
+			relayer_id_at_bridged_chain: self.message_lane.relayer_id_at_source.clone(),
 			proof,
-			messages_count as _,
+			messages_count: messages_count as _,
 			dispatch_weight,
-		)
+		}
 		.into();
 		let call_weight = call.get_dispatch_info().weight;
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Millau::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Rialto -> Millau delivery transaction. Weight: {}/{}, size: {}/{}",
@@ -126,34 +167,50 @@ impl SubstrateMessageLane for RialtoMessagesToMillau {
 }
 
 /// Rialto node as messages source.
-type RialtoSourceClient =
-	SubstrateMessagesSource<Rialto, RialtoMessagesToMillau, rialto_runtime::WithMillauMessagesInstance>;
+type RialtoSourceClient = SubstrateMessagesSource<RialtoMessagesToMillau>;
 
 /// Millau node as messages target.
-type MillauTargetClient =
-	SubstrateMessagesTarget<Millau, RialtoMessagesToMillau, millau_runtime::WithRialtoMessagesInstance>;
+type MillauTargetClient = SubstrateMessagesTarget<RialtoMessagesToMillau>;
 
 /// Run Rialto-to-Millau messages sync.
 pub async fn run(
-	params: MessagesRelayParams<Rialto, RialtoSigningParams, Millau, MillauSigningParams>,
-) -> Result<(), String> {
-	let stall_timeout = Duration::from_secs(5 * 60);
+	params: MessagesRelayParams<
+		Rialto,
+		RialtoSigningParams,
+		Millau,
+		MillauSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Rialto::AVERAGE_BLOCK_INTERVAL,
+		Millau::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
 	let relayer_id_at_rialto = (*params.source_sign.public().as_array_ref()).into();
 
 	let lane_id = params.lane_id;
 	let source_client = params.source_client;
 	let lane = RialtoMessagesToMillau {
-		source_client: source_client.clone(),
-		source_sign: params.source_sign,
-		target_client: params.target_client.clone(),
-		target_sign: params.target_sign,
-		relayer_id_at_source: relayer_id_at_rialto,
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_rialto,
+		},
 	};
 
 	// 2/3 is reserved for proofs and tx overhead
 	let max_messages_size_in_single_batch = bp_millau::max_extrinsic_size() / 3;
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_millau::max_extrinsic_weight(),
 			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
@@ -164,13 +221,25 @@ pub async fn run(
 			Rialto relayer account id: {:?}\n\t\
 			Max messages in single transaction: {}\n\t\
 			Max messages size in single transaction: {}\n\t\
-			Max messages weight in single transaction: {}",
-		lane.relayer_id_at_source,
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
 		max_messages_in_single_batch,
 		max_messages_size_in_single_batch,
 		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
 	);
 
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<RialtoMessagesToMillau as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
 	messages_relay::message_lane_loop::run(
 		messages_relay::message_lane_loop::Params {
 			lane: lane_id,
@@ -179,58 +248,86 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_millau::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
-				relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic,
+				relay_strategy: params.relay_strategy,
 			},
 		},
 		RialtoSourceClient::new(
 			source_client.clone(),
 			lane.clone(),
 			lane_id,
-			MILLAU_CHAIN_ID,
 			params.target_to_source_headers_relay,
 		),
 		MillauTargetClient::new(
 			params.target_client,
 			lane,
 			lane_id,
-			RIALTO_CHAIN_ID,
+			metrics_values,
 			params.source_to_target_headers_relay,
 		),
-		relay_utils::relay_metrics(
-			Some(messages_relay::message_lane_loop::metrics_prefix::<
-				RialtoMessagesToMillau,
-			>(&lane_id)),
-			params.metrics_params,
-		)
-		.standalone_metric(|registry, prefix| {
-			StorageProofOverheadMetric::new(
-				registry,
-				prefix,
-				source_client.clone(),
-				"rialto_storage_proof_overhead".into(),
-				"Rialto storage proof overhead".into(),
-			)
-		})?
-		.standalone_metric(|registry, prefix| {
-			FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new(
-				registry,
-				prefix,
-				source_client,
-				sp_core::storage::StorageKey(
-					rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(),
-				),
-				Some(rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE),
-				"rialto_millau_to_rialto_conversion_rate".into(),
-				"Millau to Rialto tokens conversion rate (used by Millau)".into(),
-			)
-		})?
-		.into_params(),
+		metrics_params,
 		futures::future::pending(),
 	)
 	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Rialto -> Millau messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Rialto>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<RialtoMessagesToMillau>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		Some(crate::chains::RIALTO_ASSOCIATED_TOKEN_ID),
+		Some(crate::chains::MILLAU_ASSOCIATED_TOKEN_ID),
+		Some((
+			sp_core::storage::StorageKey(
+				rialto_runtime::millau_messages::MillauToRialtoConversionRate::key().to_vec(),
+			),
+			rialto_runtime::millau_messages::INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE,
+		)),
+	)
+}
+
+/// Update Millau -> Rialto conversion rate, stored in Rialto runtime storage.
+pub(crate) async fn update_millau_to_rialto_conversion_rate(
+	client: Client<Rialto>,
+	signer: <Rialto as TransactionSignScheme>::AccountKeyPair,
+	updated_rate: f64,
+) -> anyhow::Result<()> {
+	let genesis_hash = *client.genesis_hash();
+	let signer_id = (*signer.public().as_array_ref()).into();
+	client
+		.submit_signed_extrinsic(signer_id, move |_, transaction_nonce| {
+			Bytes(
+				Rialto::sign_transaction(
+					genesis_hash,
+					&signer,
+					relay_substrate_client::TransactionEra::immortal(),
+					UnsignedTransaction::new(
+						rialto_runtime::MessagesCall::update_pallet_parameter {
+							parameter: rialto_runtime::millau_messages::RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(
+								sp_runtime::FixedU128::from_float(updated_rate),
+							),
+						}
+						.into(),
+						transaction_nonce,
+					),
+				)
+				.encode(),
+			)
+		})
+		.await
+		.map(drop)
+		.map_err(|err| anyhow::format_err!("{:?}", err))
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs
new file mode 100644
index 0000000000000000000000000000000000000000..edd4ca36285406c9b69b8b7d34793f4ae30ea456
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs
@@ -0,0 +1,82 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Rialto parachain specification for CLI.
+
+use crate::cli::{
+	encode_call::{Call, CliEncodeCall},
+	encode_message, CliChain,
+};
+use bp_message_dispatch::MessagePayload;
+use codec::Decode;
+use frame_support::weights::{DispatchInfo, GetDispatchInfo, Weight};
+use relay_rialto_parachain_client::RialtoParachain;
+use sp_version::RuntimeVersion;
+
+impl CliEncodeCall for RialtoParachain {
+	fn max_extrinsic_size() -> u32 {
+		bp_rialto_parachain::max_extrinsic_size()
+	}
+
+	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
+		Ok(match call {
+			Call::Raw { data } => Decode::decode(&mut &*data.0)?,
+			Call::Remark { remark_payload, .. } => rialto_parachain_runtime::Call::System(
+				rialto_parachain_runtime::SystemCall::remark {
+					remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
+				},
+			),
+			Call::Transfer { recipient, amount } => rialto_parachain_runtime::Call::Balances(
+				rialto_parachain_runtime::BalancesCall::transfer {
+					dest: recipient.raw_id().into(),
+					value: amount.0,
+				},
+			),
+			Call::BridgeSendMessage { .. } =>
+				anyhow::bail!("Bridge messages are not (yet) supported here",),
+		})
+	}
+
+	fn get_dispatch_info(call: &rialto_parachain_runtime::Call) -> anyhow::Result<DispatchInfo> {
+		Ok(call.get_dispatch_info())
+	}
+}
+
+impl CliChain for RialtoParachain {
+	const RUNTIME_VERSION: RuntimeVersion = rialto_parachain_runtime::VERSION;
+
+	type KeyPair = sp_core::sr25519::Pair;
+	type MessagePayload = MessagePayload<
+		bp_rialto_parachain::AccountId,
+		bp_millau::AccountSigner,
+		bp_millau::Signature,
+		Vec<u8>,
+	>;
+
+	fn ss58_format() -> u16 {
+		rialto_parachain_runtime::SS58Prefix::get() as u16
+	}
+
+	fn max_extrinsic_weight() -> Weight {
+		bp_rialto_parachain::max_extrinsic_weight()
+	}
+
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		anyhow::bail!("Not supported")
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs
index ec94450a63de5f3acefe85f89af7d52ffa407998..4df60f89faa213679b60613f93279cba2100bb99 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs
@@ -14,6 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
+use anyhow::anyhow;
 use codec::Decode;
 use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight};
 use relay_rococo_client::Rococo;
@@ -27,7 +28,7 @@ use crate::cli::{
 
 /// Weight of the `system::remark` call at Rococo.
 ///
-/// This weight is larger (x2) than actual weight at current Rooco runtime to avoid unsuccessful
+/// This weight is larger (x2) than actual weight at current Rococo runtime to avoid unsuccessful
 /// calls in the future. But since it is used only in tests (and on test chains), this is ok.
 pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000;
 
@@ -38,41 +39,41 @@ impl CliEncodeCall for Rococo {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_rococo_client::runtime::Call::System(
+				relay_rococo_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::ROCOCO_TO_WOCOCO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_rococo_client::runtime::Call::BridgeMessagesWococo(
-						relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::ROCOCO_TO_WOCOCO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_rococo_client::runtime::Call::BridgeMessagesWococo(
+							relay_rococo_client::runtime::BridgeMessagesWococoCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("The call is not supported"),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_rococo_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_rococo_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_rococo_client::runtime::Call::System(relay_rococo_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_rococo_client::runtime::Call::System(
+				relay_rococo_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Rococo call: {:?}", call),
 		}
 	}
@@ -92,7 +93,9 @@ impl CliChain for Rococo {
 		bp_wococo::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
-		Err("Sending messages from Rococo is not yet supported.".into())
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		Err(anyhow!("Sending messages from Rococo is not yet supported."))
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
index c7f60100f13c12f938fc76bc574986343f2849a5..25fd97a90bab999327158b2801e3d261baed2673 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs
@@ -16,56 +16,91 @@
 
 //! Rococo-to-Wococo headers sync entrypoint.
 
-use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY;
-use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use codec::Encode;
+use sp_core::{Bytes, Pair};
 
 use bp_header_chain::justification::GrandpaJustification;
-use codec::Encode;
 use relay_rococo_client::{Rococo, SyncHeader as RococoSyncHeader};
-use relay_substrate_client::{Chain, TransactionSignScheme};
+use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_wococo_client::{SigningParams as WococoSigningParams, Wococo};
-use sp_core::{Bytes, Pair};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
+
+use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY;
 
 /// Rococo-to-Wococo finality sync pipeline.
-pub(crate) type RococoFinalityToWococo = SubstrateFinalityToSubstrate<Rococo, Wococo, WococoSigningParams>;
+pub(crate) type FinalityPipelineRococoFinalityToWococo =
+	SubstrateFinalityToSubstrate<Rococo, Wococo, WococoSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct RococoFinalityToWococo {
+	finality_pipeline: FinalityPipelineRococoFinalityToWococo,
+}
+
+impl RococoFinalityToWococo {
+	pub fn new(target_client: Client<Wococo>, target_sign: WococoSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelineRococoFinalityToWococo::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
 
 impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo {
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	type FinalitySyncPipeline = FinalityPipelineRococoFinalityToWococo;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
 
 	type TargetChain = Wococo;
 
 	fn customize_metrics(params: MetricsParams) -> anyhow::Result<MetricsParams> {
-		crate::chains::add_polkadot_kusama_price_metrics::<Self>(params)
+		crate::chains::add_polkadot_kusama_price_metrics::<Self::FinalitySyncPipeline>(
+			Some(finality_relay::metrics_prefix::<Self::FinalitySyncPipeline>()),
+			params,
+		)
 	}
 
 	fn start_relay_guards(&self) {
 		relay_substrate_client::guard::abort_on_spec_version_change(
-			self.target_client.clone(),
+			self.finality_pipeline.target_client.clone(),
 			bp_wococo::VERSION.spec_version,
 		);
 		relay_substrate_client::guard::abort_when_account_balance_decreased(
-			self.target_client.clone(),
+			self.finality_pipeline.target_client.clone(),
 			self.transactions_author(),
 			MAXIMAL_BALANCE_DECREASE_PER_DAY,
 		);
 	}
 
 	fn transactions_author(&self) -> bp_wococo::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Wococo as Chain>::Index,
+		era: bp_runtime::TransactionEraOf<Wococo>,
+		transaction_nonce: IndexOf<Wococo>,
 		header: RococoSyncHeader,
 		proof: GrandpaJustification<bp_rococo::Header>,
 	) -> Bytes {
 		let call = relay_wococo_client::runtime::Call::BridgeGrandpaRococo(
-			relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof(header.into_inner(), proof),
+			relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof(
+				Box::new(header.into_inner()),
+				proof,
+			),
+		);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Wococo::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
 		);
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
 
 		Bytes(transaction.encode())
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
index be5f91116ec337daef2534da453312b2adb9a1dc..523d8c490859722024d905f092fec046ec42cb5c 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs
@@ -16,53 +16,83 @@
 
 //! Rococo-to-Wococo messages sync entrypoint.
 
-use crate::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
-};
-use crate::messages_source::SubstrateMessagesSource;
-use crate::messages_target::SubstrateMessagesTarget;
+use std::ops::RangeInclusive;
 
-use bp_messages::MessageNonce;
-use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID};
-use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use codec::Encode;
-use messages_relay::message_lane::MessageLane;
-use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams};
-use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme};
-use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo};
 use sp_core::{Bytes, Pair};
-use std::{ops::RangeInclusive, time::Duration};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use frame_support::weights::Weight;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_rococo_client::{
+	HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams,
+};
+use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use relay_wococo_client::{
+	HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo,
+};
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
 
 /// Rococo-to-Wococo message lane.
-pub type RococoMessagesToWococo =
+pub type MessageLaneRococoMessagesToWococo =
 	SubstrateMessageLaneToSubstrate<Rococo, RococoSigningParams, Wococo, WococoSigningParams>;
 
+#[derive(Clone)]
+pub struct RococoMessagesToWococo {
+	message_lane: MessageLaneRococoMessagesToWococo,
+}
+
 impl SubstrateMessageLane for RococoMessagesToWococo {
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD;
+	type MessageLane = MessageLaneRococoMessagesToWococo;
+
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_wococo::TO_WOCOCO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_wococo::TO_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rococo::FROM_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_rococo::FROM_ROCOCO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_rococo::FROM_ROCOCO_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Rococo;
 	type TargetChain = Wococo;
 
 	fn source_transactions_author(&self) -> bp_rococo::AccountId {
-		(*self.source_sign.public().as_array_ref()).into()
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_receiving_proof_transaction(
 		&self,
-		transaction_nonce: <Rococo as Chain>::Index,
+		best_block_id: RococoHeaderId,
+		transaction_nonce: IndexOf<Rococo>,
 		_generated_at_block: WococoHeaderId,
-		proof: <Self as MessageLane>::MessagesReceivingProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo(
@@ -71,8 +101,16 @@ impl SubstrateMessageLane for RococoMessagesToWococo {
 				relayers_state,
 			),
 		);
-		let genesis_hash = *self.source_client.genesis_hash();
-		let transaction = Rococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Rococo::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Wococo -> Rococo confirmation transaction. Weight: <unknown>/{}, size: {}/{}",
@@ -84,34 +122,39 @@ impl SubstrateMessageLane for RococoMessagesToWococo {
 	}
 
 	fn target_transactions_author(&self) -> bp_wococo::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_delivery_transaction(
 		&self,
-		transaction_nonce: <Wococo as Chain>::Index,
+		best_block_id: WococoHeaderId,
+		transaction_nonce: IndexOf<Wococo>,
 		_generated_at_header: RococoHeaderId,
 		_nonces: RangeInclusive<MessageNonce>,
-		proof: <Self as MessageLane>::MessagesProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo(
 			relay_wococo_client::runtime::BridgeMessagesRococoCall::receive_messages_proof(
-				self.relayer_id_at_source.clone(),
+				self.message_lane.relayer_id_at_source.clone(),
 				proof,
 				messages_count as _,
 				dispatch_weight,
 			),
 		);
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Wococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Wococo::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Rococo -> Wococo delivery transaction. Weight: <unknown>/{}, size: {}/{}",
@@ -124,28 +167,42 @@ impl SubstrateMessageLane for RococoMessagesToWococo {
 }
 
 /// Rococo node as messages source.
-type RococoSourceClient =
-	SubstrateMessagesSource<Rococo, RococoMessagesToWococo, relay_rococo_client::runtime::WithWococoMessagesInstance>;
+type RococoSourceClient = SubstrateMessagesSource<RococoMessagesToWococo>;
 
 /// Wococo node as messages target.
-type WococoTargetClient =
-	SubstrateMessagesTarget<Wococo, RococoMessagesToWococo, relay_wococo_client::runtime::WithRococoMessagesInstance>;
+type WococoTargetClient = SubstrateMessagesTarget<RococoMessagesToWococo>;
 
 /// Run Rococo-to-Wococo messages sync.
 pub async fn run(
-	params: MessagesRelayParams<Rococo, RococoSigningParams, Wococo, WococoSigningParams>,
-) -> Result<(), String> {
-	let stall_timeout = Duration::from_secs(5 * 60);
+	params: MessagesRelayParams<
+		Rococo,
+		RococoSigningParams,
+		Wococo,
+		WococoSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Rococo::AVERAGE_BLOCK_INTERVAL,
+		Wococo::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
 	let relayer_id_at_rococo = (*params.source_sign.public().as_array_ref()).into();
 
 	let lane_id = params.lane_id;
 	let source_client = params.source_client;
 	let lane = RococoMessagesToWococo {
-		source_client: source_client.clone(),
-		source_sign: params.source_sign,
-		target_client: params.target_client.clone(),
-		target_sign: params.target_sign,
-		relayer_id_at_source: relayer_id_at_rococo,
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_rococo,
+		},
 	};
 
 	// 2/3 is reserved for proofs and tx overhead
@@ -153,14 +210,14 @@ pub async fn run(
 	// we don't know exact weights of the Wococo runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_wococo::max_extrinsic_weight(),
 			bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -168,13 +225,25 @@ pub async fn run(
 			Rococo relayer account id: {:?}\n\t\
 			Max messages in single transaction: {}\n\t\
 			Max messages size in single transaction: {}\n\t\
-			Max messages weight in single transaction: {}",
-		lane.relayer_id_at_source,
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
 		max_messages_in_single_batch,
 		max_messages_size_in_single_batch,
 		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
 	);
 
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<RococoMessagesToWococo as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
 	messages_relay::message_lane_loop::run(
 		messages_relay::message_lane_loop::Params {
 			lane: lane_id,
@@ -183,45 +252,48 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_wococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_wococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
-				relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic,
+				relay_strategy: params.relay_strategy,
 			},
 		},
 		RococoSourceClient::new(
 			source_client.clone(),
 			lane.clone(),
 			lane_id,
-			WOCOCO_CHAIN_ID,
 			params.target_to_source_headers_relay,
 		),
 		WococoTargetClient::new(
 			params.target_client,
 			lane,
 			lane_id,
-			ROCOCO_CHAIN_ID,
+			metrics_values,
 			params.source_to_target_headers_relay,
 		),
-		relay_utils::relay_metrics(
-			Some(messages_relay::message_lane_loop::metrics_prefix::<
-				RococoMessagesToWococo,
-			>(&lane_id)),
-			params.metrics_params,
-		)
-		.standalone_metric(|registry, prefix| {
-			StorageProofOverheadMetric::new(
-				registry,
-				prefix,
-				source_client.clone(),
-				"rococo_storage_proof_overhead".into(),
-				"Rococo storage proof overhead".into(),
-			)
-		})?
-		.into_params(),
+		metrics_params,
 		futures::future::pending(),
 	)
 	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Rococo -> Wococo messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Rococo>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<RococoMessagesToWococo>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		None,
+		None,
+		None,
+	)
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs
index 27621472d6d999e68bc3b6e6499880f70334a3d5..a42e4805512ca326802e8d4ad8c519f760f55091 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs
@@ -17,6 +17,7 @@
 //! Westend chain specification for CLI.
 
 use crate::cli::{encode_message, CliChain};
+use anyhow::anyhow;
 use frame_support::weights::Weight;
 use relay_westend_client::Westend;
 use sp_version::RuntimeVersion;
@@ -35,7 +36,9 @@ impl CliChain for Westend {
 		0
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
-		Err("Sending messages from Westend is not yet supported.".into())
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		Err(anyhow!("Sending messages from Westend is not yet supported."))
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
index 1523dc1be584cfafd77e158be71e024cf393091a..64d8ba4d889d7fa5a537900a53cc6bc845d332b1 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs
@@ -16,46 +16,80 @@
 
 //! Westend-to-Millau headers sync entrypoint.
 
-use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use codec::Encode;
+use sp_core::{Bytes, Pair};
 
 use bp_header_chain::justification::GrandpaJustification;
-use codec::Encode;
 use relay_millau_client::{Millau, SigningParams as MillauSigningParams};
-use relay_substrate_client::{Chain, TransactionSignScheme};
+use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_westend_client::{SyncHeader as WestendSyncHeader, Westend};
-use sp_core::{Bytes, Pair};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Westend-to-Millau finality sync pipeline.
-pub(crate) type WestendFinalityToMillau = SubstrateFinalityToSubstrate<Westend, Millau, MillauSigningParams>;
+pub(crate) type FinalityPipelineWestendFinalityToMillau =
+	SubstrateFinalityToSubstrate<Westend, Millau, MillauSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct WestendFinalityToMillau {
+	finality_pipeline: FinalityPipelineWestendFinalityToMillau,
+}
+
+impl WestendFinalityToMillau {
+	pub fn new(target_client: Client<Millau>, target_sign: MillauSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelineWestendFinalityToMillau::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
 
 impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau {
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD;
+	type FinalitySyncPipeline = FinalityPipelineWestendFinalityToMillau;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD;
 
 	type TargetChain = Millau;
 
 	fn customize_metrics(params: MetricsParams) -> anyhow::Result<MetricsParams> {
-		crate::chains::add_polkadot_kusama_price_metrics::<Self>(params)
+		crate::chains::add_polkadot_kusama_price_metrics::<Self::FinalitySyncPipeline>(
+			Some(finality_relay::metrics_prefix::<Self::FinalitySyncPipeline>()),
+			params,
+		)
 	}
 
 	fn transactions_author(&self) -> bp_millau::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Millau as Chain>::Index,
+		era: bp_runtime::TransactionEraOf<Millau>,
+		transaction_nonce: IndexOf<Millau>,
 		header: WestendSyncHeader,
 		proof: GrandpaJustification<bp_westend::Header>,
 	) -> Bytes {
-		let call = millau_runtime::BridgeGrandpaWestendCall::<
+		let call = millau_runtime::BridgeGrandpaCall::<
 			millau_runtime::Runtime,
 			millau_runtime::WestendGrandpaInstance,
-		>::submit_finality_proof(header.into_inner(), proof)
+		>::submit_finality_proof {
+			finality_target: Box::new(header.into_inner()),
+			justification: proof,
+		}
 		.into();
 
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Millau::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Millau::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 
 		Bytes(transaction.encode())
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs
index 9b944d781685db098e2d41cf900990eada89c056..328397d14ba7c8cc771f8be21df1ba9cf03f7767 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs
@@ -14,6 +14,7 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
+use anyhow::anyhow;
 use codec::Decode;
 use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight};
 use relay_wococo_client::Wococo;
@@ -32,41 +33,41 @@ impl CliEncodeCall for Wococo {
 
 	fn encode_call(call: &Call) -> anyhow::Result<Self::Call> {
 		Ok(match call {
-			Call::Remark { remark_payload, .. } => {
-				relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(
+			Call::Remark { remark_payload, .. } => relay_wococo_client::runtime::Call::System(
+				relay_wococo_client::runtime::SystemCall::remark(
 					remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(),
-				))
-			}
-			Call::BridgeSendMessage {
-				lane,
-				payload,
-				fee,
-				bridge_instance_index,
-			} => match *bridge_instance_index {
-				bridge::WOCOCO_TO_ROCOCO_INDEX => {
-					let payload = Decode::decode(&mut &*payload.0)?;
-					relay_wococo_client::runtime::Call::BridgeMessagesRococo(
-						relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(lane.0, payload, fee.0),
-					)
-				}
-				_ => anyhow::bail!(
-					"Unsupported target bridge pallet with instance index: {}",
-					bridge_instance_index
 				),
-			},
+			),
+			Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } =>
+				match *bridge_instance_index {
+					bridge::WOCOCO_TO_ROCOCO_INDEX => {
+						let payload = Decode::decode(&mut &*payload.0)?;
+						relay_wococo_client::runtime::Call::BridgeMessagesRococo(
+							relay_wococo_client::runtime::BridgeMessagesRococoCall::send_message(
+								lane.0, payload, fee.0,
+							),
+						)
+					},
+					_ => anyhow::bail!(
+						"Unsupported target bridge pallet with instance index: {}",
+						bridge_instance_index
+					),
+				},
 			_ => anyhow::bail!("The call is not supported"),
 		})
 	}
 
-	fn get_dispatch_info(call: &relay_wococo_client::runtime::Call) -> anyhow::Result<DispatchInfo> {
+	fn get_dispatch_info(
+		call: &relay_wococo_client::runtime::Call,
+	) -> anyhow::Result<DispatchInfo> {
 		match *call {
-			relay_wococo_client::runtime::Call::System(relay_wococo_client::runtime::SystemCall::remark(_)) => {
-				Ok(DispatchInfo {
-					weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT,
-					class: DispatchClass::Normal,
-					pays_fee: Pays::Yes,
-				})
-			}
+			relay_wococo_client::runtime::Call::System(
+				relay_wococo_client::runtime::SystemCall::remark(_),
+			) => Ok(DispatchInfo {
+				weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT,
+				class: DispatchClass::Normal,
+				pays_fee: Pays::Yes,
+			}),
 			_ => anyhow::bail!("Unsupported Rococo call: {:?}", call),
 		}
 	}
@@ -86,7 +87,9 @@ impl CliChain for Wococo {
 		bp_wococo::max_extrinsic_weight()
 	}
 
-	fn encode_message(_message: encode_message::MessagePayload) -> Result<Self::MessagePayload, String> {
-		Err("Sending messages from Wococo is not yet supported.".into())
+	fn encode_message(
+		_message: encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload> {
+		Err(anyhow!("Sending messages from Wococo is not yet supported."))
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
index 8ee30d3ff492581c08ea95d61684d34d234686e0..8e11698c1bb663ceb2fcd5c99017f0a250df50dc 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs
@@ -16,15 +16,17 @@
 
 //! Wococo-to-Rococo headers sync entrypoint.
 
-use crate::finality_pipeline::{SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate};
+use codec::Encode;
+use sp_core::{Bytes, Pair};
 
 use bp_header_chain::justification::GrandpaJustification;
-use codec::Encode;
 use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams};
-use relay_substrate_client::{Chain, TransactionSignScheme};
+use relay_substrate_client::{Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
 use relay_utils::metrics::MetricsParams;
 use relay_wococo_client::{SyncHeader as WococoSyncHeader, Wococo};
-use sp_core::{Bytes, Pair};
+use substrate_relay_helper::finality_pipeline::{
+	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate,
+};
 
 /// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat
 /// relay as gone wild.
@@ -34,44 +36,76 @@ use sp_core::{Bytes, Pair};
 pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_rococo::Balance = 1_500_000_000_000_000;
 
 /// Wococo-to-Rococo finality sync pipeline.
-pub(crate) type WococoFinalityToRococo = SubstrateFinalityToSubstrate<Wococo, Rococo, RococoSigningParams>;
+pub(crate) type FinalityPipelineWococoFinalityToRococo =
+	SubstrateFinalityToSubstrate<Wococo, Rococo, RococoSigningParams>;
+
+#[derive(Clone, Debug)]
+pub(crate) struct WococoFinalityToRococo {
+	finality_pipeline: FinalityPipelineWococoFinalityToRococo,
+}
+
+impl WococoFinalityToRococo {
+	pub fn new(target_client: Client<Rococo>, target_sign: RococoSigningParams) -> Self {
+		Self {
+			finality_pipeline: FinalityPipelineWococoFinalityToRococo::new(
+				target_client,
+				target_sign,
+			),
+		}
+	}
+}
 
 impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo {
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	type FinalitySyncPipeline = FinalityPipelineWococoFinalityToRococo;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
 
 	type TargetChain = Rococo;
 
 	fn customize_metrics(params: MetricsParams) -> anyhow::Result<MetricsParams> {
-		crate::chains::add_polkadot_kusama_price_metrics::<Self>(params)
+		crate::chains::add_polkadot_kusama_price_metrics::<Self::FinalitySyncPipeline>(
+			Some(finality_relay::metrics_prefix::<Self::FinalitySyncPipeline>()),
+			params,
+		)
 	}
 
 	fn start_relay_guards(&self) {
 		relay_substrate_client::guard::abort_on_spec_version_change(
-			self.target_client.clone(),
+			self.finality_pipeline.target_client.clone(),
 			bp_rococo::VERSION.spec_version,
 		);
 		relay_substrate_client::guard::abort_when_account_balance_decreased(
-			self.target_client.clone(),
+			self.finality_pipeline.target_client.clone(),
 			self.transactions_author(),
 			MAXIMAL_BALANCE_DECREASE_PER_DAY,
 		);
 	}
 
 	fn transactions_author(&self) -> bp_rococo::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.finality_pipeline.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Rococo as Chain>::Index,
+		era: bp_runtime::TransactionEraOf<Rococo>,
+		transaction_nonce: IndexOf<Rococo>,
 		header: WococoSyncHeader,
 		proof: GrandpaJustification<bp_wococo::Header>,
 	) -> Bytes {
 		let call = relay_rococo_client::runtime::Call::BridgeGrandpaWococo(
-			relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof(header.into_inner(), proof),
+			relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof(
+				Box::new(header.into_inner()),
+				proof,
+			),
+		);
+		let genesis_hash = *self.finality_pipeline.target_client.genesis_hash();
+		let transaction = Rococo::sign_transaction(
+			genesis_hash,
+			&self.finality_pipeline.target_sign,
+			era,
+			UnsignedTransaction::new(call, transaction_nonce),
 		);
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
 
 		Bytes(transaction.encode())
 	}
@@ -80,36 +114,19 @@ impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo {
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use frame_support::weights::WeightToFeePolynomial;
-	use pallet_bridge_grandpa::weights::WeightInfo;
+	use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day;
 
 	#[test]
 	fn maximal_balance_decrease_per_day_is_sane() {
-		// Rococo/Wococo GRANDPA pallet weights. They're now using Rialto weights => using `RialtoWeight` is justified.
-		//
-		// Using Rialto runtime this is slightly incorrect, because `DbWeight` of Rococo/Wococo runtime may differ
-		// from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is the same.
-		type RococoGrandpaPalletWeights = pallet_bridge_grandpa::weights::RialtoWeight<rialto_runtime::Runtime>;
-
-		// The following formula shall not be treated as super-accurate - guard is to protect from mad relays,
-		// not to protect from over-average loses.
-		//
-		// Worst case: we're submitting proof for every source header. Since we submit every header, the number of
-		// headers in ancestry proof is near to 0 (let's round up to 2). And the number of authorities is 1024,
-		// which is (now) larger than on any existing chain => normally there'll be ~1024*2/3+1 commits.
-		const AVG_VOTES_ANCESTRIES_LEN: u32 = 2;
-		const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1;
-		let number_of_source_headers_per_day: bp_wococo::Balance = bp_wococo::DAYS as _;
-		let single_source_header_submit_call_weight =
-			RococoGrandpaPalletWeights::submit_finality_proof(AVG_VOTES_ANCESTRIES_LEN, AVG_PRECOMMITS_LEN);
-		// for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + adjusted fee
-		let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2;
-		let single_source_header_tx_cost = bp_rococo::WeightToFee::calc(&single_source_header_submit_tx_weight);
-		let maximal_expected_decrease = single_source_header_tx_cost * number_of_source_headers_per_day;
+		// we expect Wococo -> Rococo relay to be running in all-headers mode
+		let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::<
+			bp_kusama::Balance,
+			bp_kusama::WeightToFee,
+		>(bp_wococo::DAYS);
 		assert!(
-			MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_expected_decrease,
+			MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease,
 			"Maximal expected loss per day {} is larger than hardcoded {}",
-			maximal_expected_decrease,
+			maximal_balance_decrease,
 			MAXIMAL_BALANCE_DECREASE_PER_DAY,
 		);
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
index b696801569e87f2c01c6ce0dd2cb46293bf123c4..893aeb607ab707bfcbd4918403ad6b437bc51376 100644
--- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs
@@ -16,53 +16,82 @@
 
 //! Wococo-to-Rococo messages sync entrypoint.
 
-use crate::messages_lane::{
-	select_delivery_transaction_limits, MessagesRelayParams, SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
-};
-use crate::messages_source::SubstrateMessagesSource;
-use crate::messages_target::SubstrateMessagesTarget;
+use std::ops::RangeInclusive;
 
-use bp_messages::MessageNonce;
-use bp_runtime::{ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID};
-use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
 use codec::Encode;
-use messages_relay::message_lane::MessageLane;
-use relay_rococo_client::{HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams};
-use relay_substrate_client::{metrics::StorageProofOverheadMetric, Chain, TransactionSignScheme};
-use relay_wococo_client::{HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo};
 use sp_core::{Bytes, Pair};
-use std::{ops::RangeInclusive, time::Duration};
+
+use bp_messages::MessageNonce;
+use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use frame_support::weights::Weight;
+use messages_relay::{message_lane::MessageLane, relay_strategy::MixStrategy};
+use relay_rococo_client::{
+	HeaderId as RococoHeaderId, Rococo, SigningParams as RococoSigningParams,
+};
+use relay_substrate_client::{Chain, Client, IndexOf, TransactionSignScheme, UnsignedTransaction};
+use relay_utils::metrics::MetricsParams;
+use relay_wococo_client::{
+	HeaderId as WococoHeaderId, SigningParams as WococoSigningParams, Wococo,
+};
+use substrate_relay_helper::{
+	messages_lane::{
+		select_delivery_transaction_limits, MessagesRelayParams, StandaloneMessagesMetrics,
+		SubstrateMessageLane, SubstrateMessageLaneToSubstrate,
+	},
+	messages_source::SubstrateMessagesSource,
+	messages_target::SubstrateMessagesTarget,
+	STALL_TIMEOUT,
+};
 
 /// Wococo-to-Rococo message lane.
-pub type WococoMessagesToRococo =
+pub type MessageLaneWococoMessagesToRococo =
 	SubstrateMessageLaneToSubstrate<Wococo, WococoSigningParams, Rococo, RococoSigningParams>;
 
+#[derive(Clone)]
+pub struct WococoMessagesToRococo {
+	message_lane: MessageLaneWococoMessagesToRococo,
+}
+
 impl SubstrateMessageLane for WococoMessagesToRococo {
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD;
+	type MessageLane = MessageLaneWococoMessagesToRococo;
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str =
+		bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD;
 	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str =
 		bp_rococo::TO_ROCOCO_LATEST_GENERATED_NONCE_METHOD;
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_rococo::TO_ROCOCO_LATEST_RECEIVED_NONCE_METHOD;
 
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str =
+		bp_wococo::FROM_WOCOCO_LATEST_RECEIVED_NONCE_METHOD;
 	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str =
 		bp_wococo::FROM_WOCOCO_LATEST_CONFIRMED_NONCE_METHOD;
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE;
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str =
+		bp_wococo::FROM_WOCOCO_UNREWARDED_RELAYERS_STATE;
+
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str =
+		bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str =
+		bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
 
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD;
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD;
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = bp_wococo::WITH_ROCOCO_MESSAGES_PALLET_NAME;
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = bp_rococo::WITH_WOCOCO_MESSAGES_PALLET_NAME;
+
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight =
+		bp_rococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT;
 
 	type SourceChain = Wococo;
 	type TargetChain = Rococo;
 
 	fn source_transactions_author(&self) -> bp_wococo::AccountId {
-		(*self.source_sign.public().as_array_ref()).into()
+		(*self.message_lane.source_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_receiving_proof_transaction(
 		&self,
-		transaction_nonce: <Wococo as Chain>::Index,
+		best_block_id: WococoHeaderId,
+		transaction_nonce: IndexOf<Wococo>,
 		_generated_at_block: RococoHeaderId,
-		proof: <Self as MessageLane>::MessagesReceivingProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
 	) -> Bytes {
 		let (relayers_state, proof) = proof;
 		let call = relay_wococo_client::runtime::Call::BridgeMessagesRococo(
@@ -71,8 +100,16 @@ impl SubstrateMessageLane for WococoMessagesToRococo {
 				relayers_state,
 			),
 		);
-		let genesis_hash = *self.source_client.genesis_hash();
-		let transaction = Wococo::sign_transaction(genesis_hash, &self.source_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.source_client.genesis_hash();
+		let transaction = Wococo::sign_transaction(
+			genesis_hash,
+			&self.message_lane.source_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.source_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Rococo -> Wococo confirmation transaction. Weight: <unknown>/{}, size: {}/{}",
@@ -84,34 +121,39 @@ impl SubstrateMessageLane for WococoMessagesToRococo {
 	}
 
 	fn target_transactions_author(&self) -> bp_rococo::AccountId {
-		(*self.target_sign.public().as_array_ref()).into()
+		(*self.message_lane.target_sign.public().as_array_ref()).into()
 	}
 
 	fn make_messages_delivery_transaction(
 		&self,
-		transaction_nonce: <Rococo as Chain>::Index,
+		best_block_id: WococoHeaderId,
+		transaction_nonce: IndexOf<Rococo>,
 		_generated_at_header: WococoHeaderId,
 		_nonces: RangeInclusive<MessageNonce>,
-		proof: <Self as MessageLane>::MessagesProof,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
 	) -> Bytes {
 		let (dispatch_weight, proof) = proof;
-		let FromBridgedChainMessagesProof {
-			ref nonces_start,
-			ref nonces_end,
-			..
-		} = proof;
+		let FromBridgedChainMessagesProof { ref nonces_start, ref nonces_end, .. } = proof;
 		let messages_count = nonces_end - nonces_start + 1;
 
 		let call = relay_rococo_client::runtime::Call::BridgeMessagesWococo(
 			relay_rococo_client::runtime::BridgeMessagesWococoCall::receive_messages_proof(
-				self.relayer_id_at_source.clone(),
+				self.message_lane.relayer_id_at_source.clone(),
 				proof,
 				messages_count as _,
 				dispatch_weight,
 			),
 		);
-		let genesis_hash = *self.target_client.genesis_hash();
-		let transaction = Rococo::sign_transaction(genesis_hash, &self.target_sign, transaction_nonce, call);
+		let genesis_hash = *self.message_lane.target_client.genesis_hash();
+		let transaction = Rococo::sign_transaction(
+			genesis_hash,
+			&self.message_lane.target_sign,
+			relay_substrate_client::TransactionEra::new(
+				best_block_id,
+				self.message_lane.target_transactions_mortality,
+			),
+			UnsignedTransaction::new(call, transaction_nonce),
+		);
 		log::trace!(
 			target: "bridge",
 			"Prepared Wococo -> Rococo delivery transaction. Weight: <unknown>/{}, size: {}/{}",
@@ -124,28 +166,42 @@ impl SubstrateMessageLane for WococoMessagesToRococo {
 }
 
 /// Wococo node as messages source.
-type WococoSourceClient =
-	SubstrateMessagesSource<Wococo, WococoMessagesToRococo, relay_wococo_client::runtime::WithRococoMessagesInstance>;
+type WococoSourceClient = SubstrateMessagesSource<WococoMessagesToRococo>;
 
 /// Rococo node as messages target.
-type RococoTargetClient =
-	SubstrateMessagesTarget<Rococo, WococoMessagesToRococo, relay_rococo_client::runtime::WithWococoMessagesInstance>;
+type RococoTargetClient = SubstrateMessagesTarget<WococoMessagesToRococo>;
 
 /// Run Wococo-to-Rococo messages sync.
 pub async fn run(
-	params: MessagesRelayParams<Wococo, WococoSigningParams, Rococo, RococoSigningParams>,
-) -> Result<(), String> {
-	let stall_timeout = Duration::from_secs(5 * 60);
+	params: MessagesRelayParams<
+		Wococo,
+		WococoSigningParams,
+		Rococo,
+		RococoSigningParams,
+		MixStrategy,
+	>,
+) -> anyhow::Result<()> {
+	let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout(
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		Wococo::AVERAGE_BLOCK_INTERVAL,
+		Rococo::AVERAGE_BLOCK_INTERVAL,
+		STALL_TIMEOUT,
+	);
 	let relayer_id_at_wococo = (*params.source_sign.public().as_array_ref()).into();
 
 	let lane_id = params.lane_id;
 	let source_client = params.source_client;
 	let lane = WococoMessagesToRococo {
-		source_client: source_client.clone(),
-		source_sign: params.source_sign,
-		target_client: params.target_client.clone(),
-		target_sign: params.target_sign,
-		relayer_id_at_source: relayer_id_at_wococo,
+		message_lane: SubstrateMessageLaneToSubstrate {
+			source_client: source_client.clone(),
+			source_sign: params.source_sign,
+			source_transactions_mortality: params.source_transactions_mortality,
+			target_client: params.target_client.clone(),
+			target_sign: params.target_sign,
+			target_transactions_mortality: params.target_transactions_mortality,
+			relayer_id_at_source: relayer_id_at_wococo,
+		},
 	};
 
 	// 2/3 is reserved for proofs and tx overhead
@@ -153,14 +209,14 @@ pub async fn run(
 	// we don't know exact weights of the Rococo runtime. So to guess weights we'll be using
 	// weights from Rialto and then simply dividing it by x2.
 	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
-		select_delivery_transaction_limits::<pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>>(
+		select_delivery_transaction_limits::<
+			pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>,
+		>(
 			bp_rococo::max_extrinsic_weight(),
 			bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
 		);
-	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = (
-		max_messages_in_single_batch / 2,
-		max_messages_weight_in_single_batch / 2,
-	);
+	let (max_messages_in_single_batch, max_messages_weight_in_single_batch) =
+		(max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2);
 
 	log::info!(
 		target: "bridge",
@@ -168,13 +224,25 @@ pub async fn run(
 			Wococo relayer account id: {:?}\n\t\
 			Max messages in single transaction: {}\n\t\
 			Max messages size in single transaction: {}\n\t\
-			Max messages weight in single transaction: {}",
-		lane.relayer_id_at_source,
+			Max messages weight in single transaction: {}\n\t\
+			Tx mortality: {:?}/{:?}\n\t\
+			Stall timeout: {:?}",
+		lane.message_lane.relayer_id_at_source,
 		max_messages_in_single_batch,
 		max_messages_size_in_single_batch,
 		max_messages_weight_in_single_batch,
+		params.source_transactions_mortality,
+		params.target_transactions_mortality,
+		stall_timeout,
 	);
 
+	let (metrics_params, metrics_values) = add_standalone_metrics(
+		Some(messages_relay::message_lane_loop::metrics_prefix::<
+			<WococoMessagesToRococo as SubstrateMessageLane>::MessageLane,
+		>(&lane_id)),
+		params.metrics_params,
+		source_client.clone(),
+	)?;
 	messages_relay::message_lane_loop::run(
 		messages_relay::message_lane_loop::Params {
 			lane: lane_id,
@@ -183,45 +251,48 @@ pub async fn run(
 			reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY,
 			stall_timeout,
 			delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams {
-				max_unrewarded_relayer_entries_at_target: bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-				max_unconfirmed_nonces_at_target: bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
+				max_unrewarded_relayer_entries_at_target:
+					bp_rococo::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+				max_unconfirmed_nonces_at_target:
+					bp_rococo::MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE,
 				max_messages_in_single_batch,
 				max_messages_weight_in_single_batch,
 				max_messages_size_in_single_batch,
-				relayer_mode: messages_relay::message_lane_loop::RelayerMode::Altruistic,
+				relay_strategy: params.relay_strategy,
 			},
 		},
 		WococoSourceClient::new(
 			source_client.clone(),
 			lane.clone(),
 			lane_id,
-			ROCOCO_CHAIN_ID,
 			params.target_to_source_headers_relay,
 		),
 		RococoTargetClient::new(
 			params.target_client,
 			lane,
 			lane_id,
-			WOCOCO_CHAIN_ID,
+			metrics_values,
 			params.source_to_target_headers_relay,
 		),
-		relay_utils::relay_metrics(
-			Some(messages_relay::message_lane_loop::metrics_prefix::<
-				WococoMessagesToRococo,
-			>(&lane_id)),
-			params.metrics_params,
-		)
-		.standalone_metric(|registry, prefix| {
-			StorageProofOverheadMetric::new(
-				registry,
-				prefix,
-				source_client.clone(),
-				"wococo_storage_proof_overhead".into(),
-				"Wococo storage proof overhead".into(),
-			)
-		})?
-		.into_params(),
+		metrics_params,
 		futures::future::pending(),
 	)
 	.await
+	.map_err(Into::into)
+}
+
+/// Add standalone metrics for the Wococo -> Rococo messages loop.
+pub(crate) fn add_standalone_metrics(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<Wococo>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	substrate_relay_helper::messages_lane::add_standalone_metrics::<WococoMessagesToRococo>(
+		metrics_prefix,
+		metrics_params,
+		source_client,
+		None,
+		None,
+		None,
+	)
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs
index 1feb3dcb1a46d25c82dcf6c80159c5b3de5bb396..1af6142c53eca9e1eff7768b8a6083b6c2685921 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs
@@ -14,17 +14,18 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use structopt::clap::arg_enum;
-
-arg_enum! {
-	#[derive(Debug, PartialEq, Eq)]
-	/// Supported full bridges (headers + messages).
-	pub enum FullBridge {
-		MillauToRialto,
-		RialtoToMillau,
-		RococoToWococo,
-		WococoToRococo,
-	}
+use strum::{EnumString, EnumVariantNames};
+
+#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)]
+#[strum(serialize_all = "kebab_case")]
+/// Supported full bridges (headers + messages).
+pub enum FullBridge {
+	MillauToRialto,
+	RialtoToMillau,
+	RococoToWococo,
+	WococoToRococo,
+	KusamaToPolkadot,
+	PolkadotToKusama,
 }
 
 impl FullBridge {
@@ -35,6 +36,8 @@ impl FullBridge {
 			Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX,
 			Self::RococoToWococo => ROCOCO_TO_WOCOCO_INDEX,
 			Self::WococoToRococo => WOCOCO_TO_ROCOCO_INDEX,
+			Self::KusamaToPolkadot => KUSAMA_TO_POLKADOT_INDEX,
+			Self::PolkadotToKusama => POLKADOT_TO_KUSAMA_INDEX,
 		}
 	}
 }
@@ -43,6 +46,8 @@ pub const RIALTO_TO_MILLAU_INDEX: u8 = 0;
 pub const MILLAU_TO_RIALTO_INDEX: u8 = 0;
 pub const ROCOCO_TO_WOCOCO_INDEX: u8 = 0;
 pub const WOCOCO_TO_ROCOCO_INDEX: u8 = 0;
+pub const KUSAMA_TO_POLKADOT_INDEX: u8 = 0;
+pub const POLKADOT_TO_KUSAMA_INDEX: u8 = 0;
 
 /// The macro allows executing bridge-specific code without going fully generic.
 ///
@@ -139,6 +144,50 @@ macro_rules! select_full_bridge {
 				#[allow(unused_imports)]
 				use relay_wococo_client::runtime::wococo_to_rococo_account_ownership_digest as account_ownership_digest;
 
+				$generic
+			}
+			FullBridge::KusamaToPolkadot => {
+				type Source = relay_kusama_client::Kusama;
+				#[allow(dead_code)]
+				type Target = relay_polkadot_client::Polkadot;
+
+				// Derive-account
+				#[allow(unused_imports)]
+				use bp_polkadot::derive_account_from_kusama_id as derive_account;
+
+				// Relay-messages
+				#[allow(unused_imports)]
+				use crate::chains::kusama_messages_to_polkadot::run as relay_messages;
+
+				// Send-message / Estimate-fee
+				#[allow(unused_imports)]
+				use bp_polkadot::TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD;
+				// Send-message
+				#[allow(unused_imports)]
+				use relay_kusama_client::runtime::kusama_to_polkadot_account_ownership_digest as account_ownership_digest;
+
+				$generic
+			}
+			FullBridge::PolkadotToKusama => {
+				type Source = relay_polkadot_client::Polkadot;
+				#[allow(dead_code)]
+				type Target = relay_kusama_client::Kusama;
+
+				// Derive-account
+				#[allow(unused_imports)]
+				use bp_kusama::derive_account_from_polkadot_id as derive_account;
+
+				// Relay-messages
+				#[allow(unused_imports)]
+				use crate::chains::polkadot_messages_to_kusama::run as relay_messages;
+
+				// Send-message / Estimate-fee
+				#[allow(unused_imports)]
+				use bp_kusama::TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD;
+				// Send-message
+				#[allow(unused_imports)]
+				use relay_polkadot_client::runtime::polkadot_to_kusama_account_ownership_digest as account_ownership_digest;
+
 				$generic
 			}
 		}
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs
index c7ac8761f133bba5b9c722041c54bf1a24d2f264..5b809eb69f22237ebf0505861e2ffbbcf73eba9b 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs
@@ -14,10 +14,13 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{bridge::FullBridge, AccountId};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, AccountId},
+	select_full_bridge,
+};
 use relay_substrate_client::Chain;
 use structopt::StructOpt;
+use strum::VariantNames;
 
 /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain.
 ///
@@ -28,7 +31,7 @@ use structopt::StructOpt;
 #[derive(StructOpt)]
 pub struct DeriveAccount {
 	/// A bridge instance to initialize.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	/// Source-chain address to derive Target-chain address from.
 	account: AccountId,
@@ -54,11 +57,7 @@ impl DeriveAccount {
 		select_full_bridge!(self.bridge, {
 			let (account, derived_account) = self.derive_account();
 			println!("Source address:\n{} ({})", account, Source::NAME);
-			println!(
-				"->Corresponding (derived) address:\n{} ({})",
-				derived_account,
-				Target::NAME,
-			);
+			println!("->Corresponding (derived) address:\n{} ({})", derived_account, Target::NAME,);
 
 			Ok(())
 		})
@@ -80,9 +79,9 @@ mod tests {
 		let millau = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9";
 
 		// when
-		let (rialto_parsed, rialto_derived) = derive_account_cli("RialtoToMillau", rialto);
-		let (millau_parsed, millau_derived) = derive_account_cli("MillauToRialto", millau);
-		let (millau2_parsed, millau2_derived) = derive_account_cli("MillauToRialto", rialto);
+		let (rialto_parsed, rialto_derived) = derive_account_cli("rialto-to-millau", rialto);
+		let (millau_parsed, millau_derived) = derive_account_cli("millau-to-rialto", millau);
+		let (millau2_parsed, millau2_derived) = derive_account_cli("millau-to-rialto", rialto);
 
 		// then
 		assert_eq!(format!("{}", rialto_parsed), rialto);
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs
index cfe6d99a4eb9f76988845b34342b66368e6d45cd..f496f78b29d2bc4dde3447aaec0f3d26a3d8001a 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs
@@ -14,18 +14,22 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId};
-use crate::select_full_bridge;
+use crate::{
+	cli::{
+		bridge::FullBridge, AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId,
+	},
+	select_full_bridge,
+};
 use frame_support::weights::DispatchInfo;
 use relay_substrate_client::Chain;
 use structopt::StructOpt;
+use strum::VariantNames;
 
 /// Encode source chain runtime call.
 #[derive(StructOpt, Debug)]
 pub struct EncodeCall {
 	/// A bridge instance to encode call for.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	#[structopt(flatten)]
 	call: Call,
@@ -125,31 +129,30 @@ pub(crate) fn preprocess_call<Source: CliEncodeCall + CliChain, Target: CliEncod
 	bridge_instance: u8,
 ) {
 	match *call {
-		Call::Raw { .. } => {}
-		Call::Remark {
-			ref remark_size,
-			ref mut remark_payload,
-		} => {
+		Call::Raw { .. } => {},
+		Call::Remark { ref remark_size, ref mut remark_payload } =>
 			if remark_payload.is_none() {
 				*remark_payload = Some(HexBytes(generate_remark_payload(
 					remark_size,
-					compute_maximal_message_arguments_size(Source::max_extrinsic_size(), Target::max_extrinsic_size()),
+					compute_maximal_message_arguments_size(
+						Source::max_extrinsic_size(),
+						Target::max_extrinsic_size(),
+					),
 				)));
-			}
-		}
+			},
 		Call::Transfer { ref mut recipient, .. } => {
 			recipient.enforce_chain::<Source>();
-		}
-		Call::BridgeSendMessage {
-			ref mut bridge_instance_index,
-			..
-		} => {
+		},
+		Call::BridgeSendMessage { ref mut bridge_instance_index, .. } => {
 			*bridge_instance_index = bridge_instance;
-		}
+		},
 	};
 }
 
-fn generate_remark_payload(remark_size: &Option<ExplicitOrMaximal<usize>>, maximal_allowed_size: u32) -> Vec<u8> {
+fn generate_remark_payload(
+	remark_size: &Option<ExplicitOrMaximal<usize>>,
+	maximal_allowed_size: u32,
+) -> Vec<u8> {
 	match remark_size {
 		Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size],
 		Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _],
@@ -171,9 +174,11 @@ pub(crate) fn compute_maximal_message_arguments_size(
 ) -> u32 {
 	// assume that both signed extensions and other arguments fit 1KB
 	let service_tx_bytes_on_source_chain = 1024;
-	let maximal_source_extrinsic_size = maximal_source_extrinsic_size - service_tx_bytes_on_source_chain;
-	let maximal_call_size =
-		bridge_runtime_common::messages::target::maximal_incoming_message_size(maximal_target_extrinsic_size);
+	let maximal_source_extrinsic_size =
+		maximal_source_extrinsic_size - service_tx_bytes_on_source_chain;
+	let maximal_call_size = bridge_runtime_common::messages::target::maximal_incoming_message_size(
+		maximal_target_extrinsic_size,
+	);
 	let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size {
 		maximal_source_extrinsic_size
 	} else {
@@ -188,13 +193,14 @@ pub(crate) fn compute_maximal_message_arguments_size(
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use crate::cli::send_message::SendMessage;
 
 	#[test]
 	fn should_encode_transfer_call() {
 		// given
 		let mut encode_call = EncodeCall::from_iter(vec![
 			"encode-call",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"transfer",
 			"--amount",
 			"12345",
@@ -208,20 +214,21 @@ mod tests {
 		// then
 		assert_eq!(
 			format!("{:?}", hex),
-			"0x0c00d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0"
+			"0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0"
 		);
 	}
 
 	#[test]
 	fn should_encode_remark_with_default_payload() {
 		// given
-		let mut encode_call = EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark"]);
+		let mut encode_call =
+			EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark"]);
 
 		// when
 		let hex = encode_call.encode().unwrap();
 
 		// then
-		assert!(format!("{:?}", hex).starts_with("0x070154556e69782074696d653a"));
+		assert!(format!("{:?}", hex).starts_with("0x000154556e69782074696d653a"));
 	}
 
 	#[test]
@@ -229,7 +236,7 @@ mod tests {
 		// given
 		let mut encode_call = EncodeCall::from_iter(vec![
 			"encode-call",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"remark",
 			"--remark-payload",
 			"1234",
@@ -239,20 +246,25 @@ mod tests {
 		let hex = encode_call.encode().unwrap();
 
 		// then
-		assert_eq!(format!("{:?}", hex), "0x0701081234");
+		assert_eq!(format!("{:?}", hex), "0x0001081234");
 	}
 
 	#[test]
 	fn should_encode_remark_with_size() {
 		// given
-		let mut encode_call =
-			EncodeCall::from_iter(vec!["encode-call", "RialtoToMillau", "remark", "--remark-size", "12"]);
+		let mut encode_call = EncodeCall::from_iter(vec![
+			"encode-call",
+			"rialto-to-millau",
+			"remark",
+			"--remark-size",
+			"12",
+		]);
 
 		// when
 		let hex = encode_call.encode().unwrap();
 
 		// then
-		assert_eq!(format!("{:?}", hex), "0x070130000000000000000000000000");
+		assert_eq!(format!("{:?}", hex), "0x000130000000000000000000000000");
 	}
 
 	#[test]
@@ -260,7 +272,7 @@ mod tests {
 		// when
 		let err = EncodeCall::from_iter_safe(vec![
 			"encode-call",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"remark",
 			"--remark-payload",
 			"1234",
@@ -273,6 +285,68 @@ mod tests {
 		assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict);
 
 		let info = err.info.unwrap();
-		assert!(info.contains(&"remark-payload".to_string()) | info.contains(&"remark-size".to_string()))
+		assert!(
+			info.contains(&"remark-payload".to_string()) |
+				info.contains(&"remark-size".to_string())
+		)
+	}
+
+	#[test]
+	fn should_encode_raw_call() {
+		// given
+		let mut encode_call = EncodeCall::from_iter(vec![
+			"encode-call",
+			"rialto-to-millau",
+			"raw",
+			"040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0",
+		]);
+
+		// when
+		let hex = encode_call.encode().unwrap();
+
+		// then
+		assert_eq!(
+			format!("{:?}", hex),
+			"0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0"
+		);
+	}
+
+	#[test]
+	fn should_encode_bridge_send_message_call() {
+		// given
+		let encode_message = SendMessage::from_iter(vec![
+			"send-message",
+			"millau-to-rialto",
+			"--source-port",
+			"10946",
+			"--source-signer",
+			"//Alice",
+			"--target-signer",
+			"//Alice",
+			"--origin",
+			"Target",
+			"remark",
+		])
+		.encode_payload()
+		.unwrap();
+
+		let mut encode_call = EncodeCall::from_iter(vec![
+			"encode-call",
+			"rialto-to-millau",
+			"bridge-send-message",
+			"--fee",
+			"12345",
+			"--payload",
+			format!("{:}", &HexBytes::encode(&encode_message)).as_str(),
+		]);
+
+		// when
+		let call_hex = encode_call.encode().unwrap();
+
+		// then
+		assert!(format!("{:?}", call_hex).starts_with(
+			"0x10030000000001000000381409000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\
+			de39a5684e7a56da27d01d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01"
+		))
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs
index 79b7b9a42cd37913feaa87ffa4c9eeea53c5c55d..98e1269aa68e6002ba463503e2b960cdf14c4814 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs
@@ -14,9 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{bridge::FullBridge, AccountId, CliChain, HexBytes};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, AccountId, CliChain, HexBytes},
+	select_full_bridge,
+};
 use structopt::StructOpt;
+use strum::VariantNames;
 
 /// Generic message payload.
 #[derive(StructOpt, Debug, PartialEq, Eq)]
@@ -41,7 +44,7 @@ pub enum MessagePayload {
 #[derive(StructOpt)]
 pub struct EncodeMessage {
 	/// A bridge instance to initialize.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	#[structopt(flatten)]
 	payload: MessagePayload,
@@ -51,7 +54,8 @@ impl EncodeMessage {
 	/// Run the command.
 	pub fn encode(self) -> anyhow::Result<HexBytes> {
 		select_full_bridge!(self.bridge, {
-			let payload = Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?;
+			let payload =
+				Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?;
 			Ok(HexBytes::encode(&payload))
 		})
 	}
@@ -73,7 +77,8 @@ mod tests {
 	fn should_encode_raw_message() {
 		// given
 		let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000";
-		let encode_message = EncodeMessage::from_iter(vec!["encode-message", "MillauToRialto", "raw", msg]);
+		let encode_message =
+			EncodeMessage::from_iter(vec!["encode-message", "rialto-to-millau", "raw", msg]);
 
 		// when
 		let hex = encode_message.encode().unwrap();
@@ -88,7 +93,7 @@ mod tests {
 		let sender = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check();
 		let encode_message = EncodeMessage::from_iter(vec![
 			"encode-message",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"call",
 			"--sender",
 			&sender,
@@ -101,6 +106,6 @@ mod tests {
 		let hex = encode_message.encode().unwrap();
 
 		// then
-		assert_eq!(format!("{:?}", hex), "0x01000000b0d60f000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000");
+		assert_eq!(format!("{:?}", hex), "0x0100000010f108000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c000130000000000000000000000000");
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
index 129699c26917c17b826c5f346a93b3234268f3cd..d063ce544cd243099711c9c0b5b048c5b8af90e7 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs
@@ -14,18 +14,21 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams};
-use crate::select_full_bridge;
+use crate::{
+	cli::{bridge::FullBridge, Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams},
+	select_full_bridge,
+};
+use bp_runtime::BalanceOf;
 use codec::{Decode, Encode};
 use relay_substrate_client::Chain;
 use structopt::StructOpt;
+use strum::VariantNames;
 
 /// Estimate Delivery & Dispatch Fee command.
 #[derive(StructOpt, Debug, PartialEq, Eq)]
 pub struct EstimateFee {
 	/// A bridge instance to encode call for.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	#[structopt(flatten)]
 	source: SourceConnectionParams,
@@ -40,21 +43,21 @@ pub struct EstimateFee {
 impl EstimateFee {
 	/// Run the command.
 	pub async fn run(self) -> anyhow::Result<()> {
-		let Self {
-			source,
-			bridge,
-			lane,
-			payload,
-		} = self;
+		let Self { source, bridge, lane, payload } = self;
 
 		select_full_bridge!(bridge, {
 			let source_client = source.to_client::<Source>().await?;
 			let lane = lane.into();
-			let payload = Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?;
+			let payload =
+				Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?;
 
-			let fee: <Source as Chain>::Balance =
-				estimate_message_delivery_and_dispatch_fee(&source_client, ESTIMATE_MESSAGE_FEE_METHOD, lane, payload)
-					.await?;
+			let fee: BalanceOf<Source> = estimate_message_delivery_and_dispatch_fee(
+				&source_client,
+				ESTIMATE_MESSAGE_FEE_METHOD,
+				lane,
+				payload,
+			)
+			.await?;
 
 			log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _));
 			println!("{}", fee);
@@ -72,10 +75,11 @@ pub(crate) async fn estimate_message_delivery_and_dispatch_fee<Fee: Decode, C: C
 	let encoded_response = client
 		.state_call(estimate_fee_method.into(), (lane, payload).encode().into(), None)
 		.await?;
-	let decoded_response: Option<Fee> =
-		Decode::decode(&mut &encoded_response.0[..]).map_err(relay_substrate_client::Error::ResponseParseFailed)?;
-	let fee = decoded_response
-		.ok_or_else(|| anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())))?;
+	let decoded_response: Option<Fee> = Decode::decode(&mut &encoded_response.0[..])
+		.map_err(relay_substrate_client::Error::ResponseParseFailed)?;
+	let fee = decoded_response.ok_or_else(|| {
+		anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec()))
+	})?;
 	Ok(fee)
 }
 
@@ -93,7 +97,7 @@ mod tests {
 		// when
 		let res = EstimateFee::from_iter(vec![
 			"estimate_fee",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"--source-port",
 			"1234",
 			"call",
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs
index 25f220d7f6cebc6bd51f67054c82b1486263d5e1..ffda0b1200884bac7924baddbb220af5c8e3ab72 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs
@@ -18,15 +18,16 @@ use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningPa
 use bp_header_chain::InitializationData;
 use bp_runtime::Chain as ChainBase;
 use codec::Encode;
-use relay_substrate_client::{Chain, TransactionSignScheme};
+use relay_substrate_client::{Chain, TransactionSignScheme, UnsignedTransaction};
 use sp_core::{Bytes, Pair};
-use structopt::{clap::arg_enum, StructOpt};
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
 
 /// Initialize bridge pallet.
 #[derive(StructOpt)]
 pub struct InitBridge {
 	/// A bridge instance to initialize.
-	#[structopt(possible_values = &InitBridgeName::variants(), case_insensitive = true)]
+	#[structopt(possible_values = InitBridgeName::VARIANTS, case_insensitive = true)]
 	bridge: InitBridgeName,
 	#[structopt(flatten)]
 	source: SourceConnectionParams,
@@ -36,17 +37,17 @@ pub struct InitBridge {
 	target_sign: TargetSigningParams,
 }
 
-// TODO [#851] Use kebab-case.
-arg_enum! {
-	#[derive(Debug)]
-	/// Bridge to initialize.
-	pub enum InitBridgeName {
-		MillauToRialto,
-		RialtoToMillau,
-		WestendToMillau,
-		RococoToWococo,
-		WococoToRococo,
-	}
+#[derive(Debug, EnumString, EnumVariantNames)]
+#[strum(serialize_all = "kebab_case")]
+/// Bridge to initialize.
+pub enum InitBridgeName {
+	MillauToRialto,
+	RialtoToMillau,
+	WestendToMillau,
+	RococoToWococo,
+	WococoToRococo,
+	KusamaToPolkadot,
+	PolkadotToKusama,
 }
 
 macro_rules! select_bridge {
@@ -59,14 +60,17 @@ macro_rules! select_bridge {
 				fn encode_init_bridge(
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
-					rialto_runtime::SudoCall::sudo(Box::new(
-						rialto_runtime::BridgeGrandpaMillauCall::initialize(init_data).into(),
-					))
+					rialto_runtime::SudoCall::sudo {
+						call: Box::new(
+							rialto_runtime::BridgeGrandpaMillauCall::initialize { init_data }
+								.into(),
+						),
+					}
 					.into()
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::RialtoToMillau => {
 				type Source = relay_rialto_client::Rialto;
 				type Target = relay_millau_client::Millau;
@@ -74,15 +78,17 @@ macro_rules! select_bridge {
 				fn encode_init_bridge(
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
-					let initialize_call = millau_runtime::BridgeGrandpaRialtoCall::<
+					let initialize_call = millau_runtime::BridgeGrandpaCall::<
 						millau_runtime::Runtime,
 						millau_runtime::RialtoGrandpaInstance,
-					>::initialize(init_data);
-					millau_runtime::SudoCall::sudo(Box::new(initialize_call.into())).into()
+					>::initialize {
+						init_data,
+					};
+					millau_runtime::SudoCall::sudo { call: Box::new(initialize_call.into()) }.into()
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::WestendToMillau => {
 				type Source = relay_westend_client::Westend;
 				type Target = relay_millau_client::Millau;
@@ -90,18 +96,21 @@ macro_rules! select_bridge {
 				fn encode_init_bridge(
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
-					// at Westend -> Millau initialization we're not using sudo, because otherwise our deployments
-					// may fail, because we need to initialize both Rialto -> Millau and Westend -> Millau bridge.
-					// => since there's single possible sudo account, one of transaction may fail with duplicate nonce error
-					millau_runtime::BridgeGrandpaWestendCall::<
+					// at Westend -> Millau initialization we're not using sudo, because otherwise
+					// our deployments may fail, because we need to initialize both Rialto -> Millau
+					// and Westend -> Millau bridge. => since there's single possible sudo account,
+					// one of transaction may fail with duplicate nonce error
+					millau_runtime::BridgeGrandpaCall::<
 						millau_runtime::Runtime,
 						millau_runtime::WestendGrandpaInstance,
-					>::initialize(init_data)
+					>::initialize {
+						init_data,
+					}
 					.into()
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::RococoToWococo => {
 				type Source = relay_rococo_client::Rococo;
 				type Target = relay_wococo_client::Wococo;
@@ -110,12 +119,14 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_wococo_client::runtime::Call::BridgeGrandpaRococo(
-						relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(init_data),
+						relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 			InitBridgeName::WococoToRococo => {
 				type Source = relay_wococo_client::Wococo;
 				type Target = relay_rococo_client::Rococo;
@@ -124,12 +135,46 @@ macro_rules! select_bridge {
 					init_data: InitializationData<<Source as ChainBase>::Header>,
 				) -> <Target as Chain>::Call {
 					relay_rococo_client::runtime::Call::BridgeGrandpaWococo(
-						relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(init_data),
+						relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize(
+							init_data,
+						),
+					)
+				}
+
+				$generic
+			},
+			InitBridgeName::KusamaToPolkadot => {
+				type Source = relay_kusama_client::Kusama;
+				type Target = relay_polkadot_client::Polkadot;
+
+				fn encode_init_bridge(
+					init_data: InitializationData<<Source as ChainBase>::Header>,
+				) -> <Target as Chain>::Call {
+					relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa(
+						relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::initialize(
+							init_data,
+						),
+					)
+				}
+
+				$generic
+			},
+			InitBridgeName::PolkadotToKusama => {
+				type Source = relay_polkadot_client::Polkadot;
+				type Target = relay_kusama_client::Kusama;
+
+				fn encode_init_bridge(
+					init_data: InitializationData<<Source as ChainBase>::Header>,
+				) -> <Target as Chain>::Call {
+					relay_kusama_client::runtime::Call::BridgePolkadotGrandpa(
+						relay_kusama_client::runtime::BridgePolkadotGrandpaCall::initialize(
+							init_data,
+						),
 					)
 				}
 
 				$generic
-			}
+			},
 		}
 	};
 }
@@ -142,7 +187,7 @@ impl InitBridge {
 			let target_client = self.target.to_client::<Target>().await?;
 			let target_sign = self.target_sign.to_keypair::<Target>()?;
 
-			crate::headers_initialize::initialize(
+			substrate_relay_helper::headers_initialize::initialize(
 				source_client,
 				target_client.clone(),
 				target_sign.public().into(),
@@ -151,8 +196,11 @@ impl InitBridge {
 						Target::sign_transaction(
 							*target_client.genesis_hash(),
 							&target_sign,
-							transaction_nonce,
-							encode_init_bridge(initialization_data),
+							relay_substrate_client::TransactionEra::immortal(),
+							UnsignedTransaction::new(
+								encode_init_bridge(initialization_data),
+								transaction_nonce,
+							),
 						)
 						.encode(),
 					)
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs
index 042ae320d502f0da1cc0376a7a1673afdde287ec..d98e8af0af084d297a4610fd2cbd72ca6593a5ff 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs
@@ -32,9 +32,12 @@ pub(crate) mod send_message;
 
 mod derive_account;
 mod init_bridge;
+mod register_parachain;
 mod relay_headers;
 mod relay_headers_and_messages;
 mod relay_messages;
+mod resubmit_transactions;
+mod swap_tokens;
 
 /// Parse relay CLI args.
 pub fn parse_args() -> Command {
@@ -84,8 +87,15 @@ pub enum Command {
 	EncodeMessage(encode_message::EncodeMessage),
 	/// Estimate Delivery and Dispatch Fee required for message submission to messages pallet.
 	EstimateFee(estimate_fee::EstimateFee),
-	/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain.
+	/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target
+	/// chain.
 	DeriveAccount(derive_account::DeriveAccount),
+	/// Resubmit transactions with increased tip if they are stalled.
+	ResubmitTransactions(resubmit_transactions::ResubmitTransactions),
+	/// Swap tokens using token-swap bridge.
+	SwapTokens(swap_tokens::SwapTokens),
+	/// Register parachain.
+	RegisterParachain(register_parachain::RegisterParachain),
 }
 
 impl Command {
@@ -94,12 +104,15 @@ impl Command {
 		use relay_utils::initialize::{initialize_logger, initialize_relay};
 
 		match self {
-			Self::RelayHeaders(_) | Self::RelayMessages(_) | Self::RelayHeadersAndMessages(_) | Self::InitBridge(_) => {
+			Self::RelayHeaders(_) |
+			Self::RelayMessages(_) |
+			Self::RelayHeadersAndMessages(_) |
+			Self::InitBridge(_) => {
 				initialize_relay();
-			}
+			},
 			_ => {
 				initialize_logger(false);
-			}
+			},
 		}
 	}
 
@@ -116,6 +129,9 @@ impl Command {
 			Self::EncodeMessage(arg) => arg.run().await?,
 			Self::EstimateFee(arg) => arg.run().await?,
 			Self::DeriveAccount(arg) => arg.run().await?,
+			Self::ResubmitTransactions(arg) => arg.run().await?,
+			Self::SwapTokens(arg) => arg.run().await?,
+			Self::RegisterParachain(arg) => arg.run().await?,
 		}
 		Ok(())
 	}
@@ -187,10 +203,7 @@ const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed";
 impl AccountId {
 	/// Create new SS58-formatted address from raw account id.
 	pub fn from_raw<T: CliChain>(account: sp_runtime::AccountId32) -> Self {
-		Self {
-			account,
-			ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF),
-		}
+		Self { account, ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF) }
 	}
 
 	/// Enforces formatting account to be for given [`CliChain`] type.
@@ -228,7 +241,7 @@ pub trait CliChain: relay_substrate_client::Chain {
 	/// Chain's current version of the runtime.
 	const RUNTIME_VERSION: sp_version::RuntimeVersion;
 
-	/// Crypto keypair type used to send messages.
+	/// Crypto KeyPair type used to send messages.
 	///
 	/// In case of chains supporting multiple cryptos, pick one used by the CLI.
 	type KeyPair: sp_core::crypto::Pair;
@@ -242,7 +255,9 @@ pub trait CliChain: relay_substrate_client::Chain {
 	fn ss58_format() -> u16;
 
 	/// Construct message payload to be sent over the bridge.
-	fn encode_message(message: crate::cli::encode_message::MessagePayload) -> Result<Self::MessagePayload, String>;
+	fn encode_message(
+		message: crate::cli::encode_message::MessagePayload,
+	) -> anyhow::Result<Self::MessagePayload>;
 
 	/// Maximal extrinsic weight (from the runtime).
 	fn max_extrinsic_weight() -> Weight;
@@ -344,7 +359,7 @@ where
 
 	fn from_str(s: &str) -> Result<Self, Self::Err> {
 		if s.to_lowercase() == "max" {
-			return Ok(ExplicitOrMaximal::Maximal);
+			return Ok(ExplicitOrMaximal::Maximal)
 		}
 
 		V::from_str(s)
@@ -360,7 +375,7 @@ macro_rules! declare_chain_options {
 	($chain:ident, $chain_prefix:ident) => {
 		paste::item! {
 			#[doc = $chain " connection params."]
-			#[derive(StructOpt, Debug, PartialEq, Eq)]
+			#[derive(StructOpt, Debug, PartialEq, Eq, Clone)]
 			pub struct [<$chain ConnectionParams>] {
 				#[doc = "Connect to " $chain " node at given host."]
 				#[structopt(long, default_value = "127.0.0.1")]
@@ -374,28 +389,117 @@ macro_rules! declare_chain_options {
 			}
 
 			#[doc = $chain " signing params."]
-			#[derive(StructOpt, Debug, PartialEq, Eq)]
+			#[derive(StructOpt, Debug, PartialEq, Eq, Clone)]
 			pub struct [<$chain SigningParams>] {
 				#[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."]
 				#[structopt(long)]
-				pub [<$chain_prefix _signer>]: String,
+				pub [<$chain_prefix _signer>]: Option<String>,
 				#[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."]
 				#[structopt(long)]
 				pub [<$chain_prefix _signer_password>]: Option<String>,
+
+				#[doc = "Path to the file, that contains SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer option."]
+				#[structopt(long)]
+				pub [<$chain_prefix _signer_file>]: Option<std::path::PathBuf>,
+				#[doc = "Path to the file, that password for the SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer_password option."]
+				#[structopt(long)]
+				pub [<$chain_prefix _signer_password_file>]: Option<std::path::PathBuf>,
+
+				#[doc = "Transactions mortality period, in blocks. MUST be a power of two in [4; 65536] range. MAY NOT be larger than `BlockHashCount` parameter of the chain system module."]
+				#[structopt(long)]
+				pub [<$chain_prefix _transactions_mortality>]: Option<u32>,
+			}
+
+			#[doc = "Parameters required to sign transaction on behalf of owner of the messages pallet at " $chain "."]
+			#[derive(StructOpt, Debug, PartialEq, Eq)]
+			pub struct [<$chain MessagesPalletOwnerSigningParams>] {
+				#[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."]
+				#[structopt(long)]
+				pub [<$chain_prefix _messages_pallet_owner>]: Option<String>,
+				#[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."]
+				#[structopt(long)]
+				pub [<$chain_prefix _messages_pallet_owner_password>]: Option<String>,
 			}
 
 			impl [<$chain SigningParams>] {
-				/// Parse signing params into chain-specific `KeyPair`.
+				/// Return transactions mortality.
+				#[allow(dead_code)]
+				pub fn transactions_mortality(&self) -> anyhow::Result<Option<u32>> {
+					self.[<$chain_prefix _transactions_mortality>]
+						.map(|transactions_mortality| {
+							if !(4..=65536).contains(&transactions_mortality)
+								|| !transactions_mortality.is_power_of_two()
+							{
+								Err(anyhow::format_err!(
+									"Transactions mortality {} is not a power of two in a [4; 65536] range",
+									transactions_mortality,
+								))
+							} else {
+								Ok(transactions_mortality)
+							}
+						})
+						.transpose()
+				}
+
+				/// Parse signing params into chain-specific KeyPair.
+				#[allow(dead_code)]
 				pub fn to_keypair<Chain: CliChain>(&self) -> anyhow::Result<Chain::KeyPair> {
+					let suri = match (self.[<$chain_prefix _signer>].as_ref(), self.[<$chain_prefix _signer_file>].as_ref()) {
+						(Some(suri), _) => suri.to_owned(),
+						(None, Some(suri_file)) => std::fs::read_to_string(suri_file)
+							.map_err(|err| anyhow::format_err!(
+								"Failed to read SURI from file {:?}: {}",
+								suri_file,
+								err,
+							))?,
+						(None, None) => return Err(anyhow::format_err!(
+							"One of options must be specified: '{}' or '{}'",
+							stringify!([<$chain_prefix _signer>]),
+							stringify!([<$chain_prefix _signer_file>]),
+						)),
+					};
+
+					let suri_password = match (
+						self.[<$chain_prefix _signer_password>].as_ref(),
+						self.[<$chain_prefix _signer_password_file>].as_ref(),
+					) {
+						(Some(suri_password), _) => Some(suri_password.to_owned()),
+						(None, Some(suri_password_file)) => std::fs::read_to_string(suri_password_file)
+							.map(Some)
+							.map_err(|err| anyhow::format_err!(
+								"Failed to read SURI password from file {:?}: {}",
+								suri_password_file,
+								err,
+							))?,
+						_ => None,
+					};
+
 					use sp_core::crypto::Pair;
 
 					Chain::KeyPair::from_string(
-						&self.[<$chain_prefix _signer>],
-						self.[<$chain_prefix _signer_password>].as_deref()
+						&suri,
+						suri_password.as_deref()
 					).map_err(|e| anyhow::format_err!("{:?}", e))
 				}
 			}
 
+			#[allow(dead_code)]
+			impl [<$chain MessagesPalletOwnerSigningParams>] {
+				/// Parse signing params into chain-specific KeyPair.
+				pub fn to_keypair<Chain: CliChain>(&self) -> anyhow::Result<Option<Chain::KeyPair>> {
+					use sp_core::crypto::Pair;
+
+					let [<$chain_prefix _messages_pallet_owner>] = match self.[<$chain_prefix _messages_pallet_owner>] {
+						Some(ref messages_pallet_owner) => messages_pallet_owner,
+						None => return Ok(None),
+					};
+					Chain::KeyPair::from_string(
+						[<$chain_prefix _messages_pallet_owner>],
+						self.[<$chain_prefix _messages_pallet_owner_password>].as_deref()
+					).map_err(|e| anyhow::format_err!("{:?}", e)).map(Some)
+				}
+			}
+
 			impl [<$chain ConnectionParams>] {
 				/// Convert connection params into Substrate client.
 				pub async fn to_client<Chain: CliChain>(
@@ -416,9 +520,12 @@ macro_rules! declare_chain_options {
 
 declare_chain_options!(Source, source);
 declare_chain_options!(Target, target);
+declare_chain_options!(Relaychain, relaychain);
+declare_chain_options!(Parachain, parachain);
 
 #[cfg(test)]
 mod tests {
+	use sp_core::Pair;
 	use std::str::FromStr;
 
 	use super::*;
@@ -434,10 +541,7 @@ mod tests {
 		let expected = vec![rialto1, rialto2, millau1, millau2];
 
 		// when
-		let parsed = expected
-			.iter()
-			.map(|s| AccountId::from_str(s).unwrap())
-			.collect::<Vec<_>>();
+		let parsed = expected.iter().map(|s| AccountId::from_str(s).unwrap()).collect::<Vec<_>>();
 
 		let actual = parsed.iter().map(|a| format!("{}", a)).collect::<Vec<_>>();
 
@@ -456,4 +560,93 @@ mod tests {
 		// then
 		assert_eq!(hex.0, hex2.0);
 	}
+
+	#[test]
+	fn reads_suri_from_file() {
+		const ALICE: &str = "//Alice";
+		const BOB: &str = "//Bob";
+		const ALICE_PASSWORD: &str = "alice_password";
+		const BOB_PASSWORD: &str = "bob_password";
+
+		let alice = sp_core::sr25519::Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap();
+		let bob = sp_core::sr25519::Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap();
+		let bob_with_alice_password =
+			sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap();
+
+		let temp_dir = tempfile::tempdir().unwrap();
+		let mut suri_file_path = temp_dir.path().to_path_buf();
+		let mut password_file_path = temp_dir.path().to_path_buf();
+		suri_file_path.push("suri");
+		password_file_path.push("password");
+		std::fs::write(&suri_file_path, BOB.as_bytes()).unwrap();
+		std::fs::write(&password_file_path, BOB_PASSWORD.as_bytes()).unwrap();
+
+		// when both seed and password are read from file
+		assert_eq!(
+			TargetSigningParams {
+				target_signer: Some(ALICE.into()),
+				target_signer_password: Some(ALICE_PASSWORD.into()),
+
+				target_signer_file: None,
+				target_signer_password_file: None,
+
+				target_transactions_mortality: None,
+			}
+			.to_keypair::<relay_rialto_client::Rialto>()
+			.map(|p| p.public())
+			.map_err(drop),
+			Ok(alice.public()),
+		);
+
+		// when both seed and password are read from file
+		assert_eq!(
+			TargetSigningParams {
+				target_signer: None,
+				target_signer_password: None,
+
+				target_signer_file: Some(suri_file_path.clone()),
+				target_signer_password_file: Some(password_file_path.clone()),
+
+				target_transactions_mortality: None,
+			}
+			.to_keypair::<relay_rialto_client::Rialto>()
+			.map(|p| p.public())
+			.map_err(drop),
+			Ok(bob.public()),
+		);
+
+		// when password are is overriden by cli option
+		assert_eq!(
+			TargetSigningParams {
+				target_signer: None,
+				target_signer_password: Some(ALICE_PASSWORD.into()),
+
+				target_signer_file: Some(suri_file_path.clone()),
+				target_signer_password_file: Some(password_file_path.clone()),
+
+				target_transactions_mortality: None,
+			}
+			.to_keypair::<relay_rialto_client::Rialto>()
+			.map(|p| p.public())
+			.map_err(drop),
+			Ok(bob_with_alice_password.public()),
+		);
+
+		// when both seed and password are overriden by cli options
+		assert_eq!(
+			TargetSigningParams {
+				target_signer: Some(ALICE.into()),
+				target_signer_password: Some(ALICE_PASSWORD.into()),
+
+				target_signer_file: Some(suri_file_path),
+				target_signer_password_file: Some(password_file_path),
+
+				target_transactions_mortality: None,
+			}
+			.to_keypair::<relay_rialto_client::Rialto>()
+			.map(|p| p.public())
+			.map_err(drop),
+			Ok(alice.public()),
+		);
+	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs
new file mode 100644
index 0000000000000000000000000000000000000000..fecc431148ebde0b338eef997666081b11bee708
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs
@@ -0,0 +1,346 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate::cli::{
+	swap_tokens::wait_until_transaction_is_finalized, Balance, ParachainConnectionParams,
+	RelaychainConnectionParams, RelaychainSigningParams,
+};
+
+use codec::Encode;
+use num_traits::Zero;
+use polkadot_parachain::primitives::{
+	HeadData as ParaHeadData, Id as ParaId, ValidationCode as ParaValidationCode,
+};
+use polkadot_runtime_common::{
+	paras_registrar::Call as ParaRegistrarCall, slots::Call as ParaSlotsCall,
+};
+use polkadot_runtime_parachains::paras::ParaLifecycle;
+use relay_substrate_client::{
+	AccountIdOf, CallOf, Chain, Client, TransactionSignScheme, UnsignedTransaction,
+};
+use rialto_runtime::SudoCall;
+use sp_core::{
+	storage::{well_known_keys::CODE, StorageKey},
+	Bytes, Pair,
+};
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+/// Name of the `NextFreeParaId` value in the `polkadot_runtime_common::paras_registrar` pallet.
+const NEXT_FREE_PARA_ID_STORAGE_NAME: &str = "NextFreeParaId";
+/// Name of the `ParaLifecycles` map in the `polkadot_runtime_parachains::paras` pallet.
+const PARAS_LIFECYCLES_STORAGE_NAME: &str = "ParaLifecycles";
+
+/// Register parachain.
+#[derive(StructOpt, Debug, PartialEq)]
+pub struct RegisterParachain {
+	/// A parachain to register.
+	#[structopt(possible_values = Parachain::VARIANTS, case_insensitive = true)]
+	parachain: Parachain,
+	/// Parachain deposit.
+	#[structopt(long, default_value = "0")]
+	deposit: Balance,
+	/// Lease begin.
+	#[structopt(long, default_value = "0")]
+	lease_begin: u32,
+	/// Lease end.
+	#[structopt(long, default_value = "256")]
+	lease_end: u32,
+	#[structopt(flatten)]
+	relay_connection: RelaychainConnectionParams,
+	#[structopt(flatten)]
+	relay_sign: RelaychainSigningParams,
+	#[structopt(flatten)]
+	para_connection: ParachainConnectionParams,
+}
+
+/// Parachain to register.
+#[derive(Debug, EnumString, EnumVariantNames, PartialEq)]
+#[strum(serialize_all = "kebab_case")]
+pub enum Parachain {
+	RialtoParachain,
+}
+
+macro_rules! select_bridge {
+	($bridge: expr, $generic: tt) => {
+		match $bridge {
+			Parachain::RialtoParachain => {
+				type Relaychain = relay_rialto_client::Rialto;
+				type Parachain = relay_rialto_parachain_client::RialtoParachain;
+
+				use bp_rialto::{PARAS_PALLET_NAME, PARAS_REGISTRAR_PALLET_NAME};
+
+				$generic
+			},
+		}
+	};
+}
+
+impl RegisterParachain {
+	/// Run the command.
+	pub async fn run(self) -> anyhow::Result<()> {
+		select_bridge!(self.parachain, {
+			let relay_client = self.relay_connection.to_client::<Relaychain>().await?;
+			let relay_sign = self.relay_sign.to_keypair::<Relaychain>()?;
+			let para_client = self.para_connection.to_client::<Parachain>().await?;
+
+			// hopefully we're the only actor that is registering parachain right now
+			// => read next parachain id
+			let para_id_key = bp_runtime::storage_value_final_key(
+				PARAS_REGISTRAR_PALLET_NAME.as_bytes(),
+				NEXT_FREE_PARA_ID_STORAGE_NAME.as_bytes(),
+			);
+			let para_id: ParaId = relay_client
+				.storage_value(StorageKey(para_id_key.to_vec()), None)
+				.await?
+				.unwrap_or(polkadot_primitives::v1::LOWEST_PUBLIC_ID)
+				.max(polkadot_primitives::v1::LOWEST_PUBLIC_ID);
+			log::info!(target: "bridge", "Going to reserve parachain id: {:?}", para_id);
+
+			// step 1: reserve a parachain id
+			let relay_genesis_hash = *relay_client.genesis_hash();
+			let relay_sudo_account: AccountIdOf<Relaychain> = relay_sign.public().into();
+			let reserve_parachain_id_call: CallOf<Relaychain> =
+				ParaRegistrarCall::reserve {}.into();
+			let reserve_parachain_signer = relay_sign.clone();
+			wait_until_transaction_is_finalized::<Relaychain>(
+				relay_client
+					.submit_and_watch_signed_extrinsic(
+						relay_sudo_account.clone(),
+						move |_, transaction_nonce| {
+							Bytes(
+								Relaychain::sign_transaction(
+									relay_genesis_hash,
+									&reserve_parachain_signer,
+									relay_substrate_client::TransactionEra::immortal(),
+									UnsignedTransaction::new(
+										reserve_parachain_id_call,
+										transaction_nonce,
+									),
+								)
+								.encode(),
+							)
+						},
+					)
+					.await?,
+			)
+			.await?;
+			log::info!(target: "bridge", "Reserved parachain id: {:?}", para_id);
+
+			// step 2: register parathread
+			let para_genesis_header = para_client.header_by_number(Zero::zero()).await?;
+			let para_code = para_client
+				.raw_storage_value(StorageKey(CODE.to_vec()), Some(para_genesis_header.hash()))
+				.await?
+				.ok_or_else(|| {
+					anyhow::format_err!("Cannot fetch validation code of {}", Parachain::NAME)
+				})?
+				.0;
+			log::info!(
+				target: "bridge",
+				"Going to register parachain {:?}: genesis len = {} code len = {}",
+				para_id,
+				para_genesis_header.encode().len(),
+				para_code.len(),
+			);
+			let register_parathread_call: CallOf<Relaychain> = ParaRegistrarCall::register {
+				id: para_id,
+				genesis_head: ParaHeadData(para_genesis_header.encode()),
+				validation_code: ParaValidationCode(para_code),
+			}
+			.into();
+			let register_parathread_signer = relay_sign.clone();
+			wait_until_transaction_is_finalized::<Relaychain>(
+				relay_client
+					.submit_and_watch_signed_extrinsic(
+						relay_sudo_account.clone(),
+						move |_, transaction_nonce| {
+							Bytes(
+								Relaychain::sign_transaction(
+									relay_genesis_hash,
+									&register_parathread_signer,
+									relay_substrate_client::TransactionEra::immortal(),
+									UnsignedTransaction::new(
+										register_parathread_call,
+										transaction_nonce,
+									),
+								)
+								.encode(),
+							)
+						},
+					)
+					.await?,
+			)
+			.await?;
+			log::info!(target: "bridge", "Registered parachain: {:?}. Waiting for onboarding", para_id);
+
+			// wait until parathread is onboarded
+			let para_state_key = bp_runtime::storage_map_final_key_twox64_concat(
+				PARAS_PALLET_NAME,
+				PARAS_LIFECYCLES_STORAGE_NAME,
+				&para_id.encode(),
+			);
+			wait_para_state(
+				&relay_client,
+				&para_state_key.0,
+				&[ParaLifecycle::Onboarding, ParaLifecycle::Parathread],
+				ParaLifecycle::Parathread,
+			)
+			.await?;
+
+			// step 3: force parachain leases
+			let lease_begin = self.lease_begin;
+			let lease_end = self.lease_end;
+			let para_deposit = self.deposit.cast().into();
+			log::info!(
+				target: "bridge",
+				"Going to force leases of parachain {:?}: [{}; {}]",
+				para_id,
+				lease_begin,
+				lease_end,
+			);
+			let force_lease_call: CallOf<Relaychain> = SudoCall::sudo {
+				call: Box::new(
+					ParaSlotsCall::force_lease {
+						para: para_id,
+						leaser: relay_sudo_account.clone(),
+						amount: para_deposit,
+						period_begin: lease_begin,
+						period_count: lease_end.saturating_sub(lease_begin).saturating_add(1),
+					}
+					.into(),
+				),
+			}
+			.into();
+			let force_lease_signer = relay_sign.clone();
+			relay_client
+				.submit_signed_extrinsic(relay_sudo_account.clone(), move |_, transaction_nonce| {
+					Bytes(
+						Relaychain::sign_transaction(
+							relay_genesis_hash,
+							&force_lease_signer,
+							relay_substrate_client::TransactionEra::immortal(),
+							UnsignedTransaction::new(force_lease_call, transaction_nonce),
+						)
+						.encode(),
+					)
+				})
+				.await?;
+			log::info!(target: "bridge", "Registered parachain leases: {:?}. Waiting for onboarding", para_id);
+
+			// wait until parachain is onboarded
+			wait_para_state(
+				&relay_client,
+				&para_state_key.0,
+				&[
+					ParaLifecycle::Onboarding,
+					ParaLifecycle::UpgradingParathread,
+					ParaLifecycle::Parathread,
+				],
+				ParaLifecycle::Parachain,
+			)
+			.await?;
+
+			Ok(())
+		})
+	}
+}
+
+/// Wait until parachain state is changed.
+async fn wait_para_state<Relaychain: Chain>(
+	relay_client: &Client<Relaychain>,
+	para_state_key: &[u8],
+	from_states: &[ParaLifecycle],
+	to_state: ParaLifecycle,
+) -> anyhow::Result<()> {
+	loop {
+		let para_state: ParaLifecycle = relay_client
+			.storage_value(StorageKey(para_state_key.to_vec()), None)
+			.await?
+			.ok_or_else(|| {
+				anyhow::format_err!(
+					"Cannot fetch next free parachain lifecycle from the runtime storage of {}",
+					Relaychain::NAME,
+				)
+			})?;
+		if para_state == to_state {
+			log::info!(target: "bridge", "Parachain state is now: {:?}", to_state);
+			return Ok(())
+		}
+		if !from_states.contains(&para_state) {
+			return Err(anyhow::format_err!("Invalid parachain lifecycle: {:?}", para_state))
+		}
+
+		log::info!(target: "bridge", "Parachain state: {:?}. Waiting for {:?}", para_state, to_state);
+		async_std::task::sleep(Relaychain::AVERAGE_BLOCK_INTERVAL).await;
+	}
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn register_rialto_parachain() {
+		let register_parachain = RegisterParachain::from_iter(vec![
+			"register-parachain",
+			"rialto-parachain",
+			"--parachain-host",
+			"127.0.0.1",
+			"--parachain-port",
+			"11949",
+			"--relaychain-host",
+			"127.0.0.1",
+			"--relaychain-port",
+			"9944",
+			"--relaychain-signer",
+			"//Alice",
+			"--deposit",
+			"42",
+			"--lease-begin",
+			"100",
+			"--lease-end",
+			"200",
+		]);
+
+		assert_eq!(
+			register_parachain,
+			RegisterParachain {
+				parachain: Parachain::RialtoParachain,
+				deposit: Balance(42),
+				lease_begin: 100,
+				lease_end: 200,
+				relay_connection: RelaychainConnectionParams {
+					relaychain_host: "127.0.0.1".into(),
+					relaychain_port: 9944,
+					relaychain_secure: false,
+				},
+				relay_sign: RelaychainSigningParams {
+					relaychain_signer: Some("//Alice".into()),
+					relaychain_signer_password: None,
+					relaychain_signer_file: None,
+					relaychain_signer_password_file: None,
+					relaychain_transactions_mortality: None,
+				},
+				para_connection: ParachainConnectionParams {
+					parachain_host: "127.0.0.1".into(),
+					parachain_port: 11949,
+					parachain_secure: false,
+				},
+			}
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs
index ec521c2918d85ae078b3b3e928dec7e624d73b4a..e90c663bb33a0a8ec2117ef4b3857765088477ce 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs
@@ -14,17 +14,23 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::{PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams};
-use crate::finality_pipeline::SubstrateFinalitySyncPipeline;
-use structopt::{clap::arg_enum, StructOpt};
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline;
+
+use crate::cli::{
+	PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams,
+};
 
 /// Start headers relayer process.
 #[derive(StructOpt)]
 pub struct RelayHeaders {
 	/// A bridge instance to relay headers for.
-	#[structopt(possible_values = &RelayHeadersBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = RelayHeadersBridge::VARIANTS, case_insensitive = true)]
 	bridge: RelayHeadersBridge,
-	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) are relayed.
+	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set)
+	/// are relayed.
 	#[structopt(long)]
 	only_mandatory_headers: bool,
 	#[structopt(flatten)]
@@ -37,17 +43,17 @@ pub struct RelayHeaders {
 	prometheus_params: PrometheusParams,
 }
 
-// TODO [#851] Use kebab-case.
-arg_enum! {
-	#[derive(Debug)]
-	/// Headers relay bridge.
-	pub enum RelayHeadersBridge {
-		MillauToRialto,
-		RialtoToMillau,
-		WestendToMillau,
-		RococoToWococo,
-		WococoToRococo,
-	}
+#[derive(Debug, EnumString, EnumVariantNames)]
+#[strum(serialize_all = "kebab_case")]
+/// Headers relay bridge.
+pub enum RelayHeadersBridge {
+	MillauToRialto,
+	RialtoToMillau,
+	WestendToMillau,
+	RococoToWococo,
+	WococoToRococo,
+	KusamaToPolkadot,
+	PolkadotToKusama,
 }
 
 macro_rules! select_bridge {
@@ -59,35 +65,49 @@ macro_rules! select_bridge {
 				type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::RialtoToMillau => {
 				type Source = relay_rialto_client::Rialto;
 				type Target = relay_millau_client::Millau;
 				type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::WestendToMillau => {
 				type Source = relay_westend_client::Westend;
 				type Target = relay_millau_client::Millau;
 				type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::RococoToWococo => {
 				type Source = relay_rococo_client::Rococo;
 				type Target = relay_wococo_client::Wococo;
 				type Finality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
 
 				$generic
-			}
+			},
 			RelayHeadersBridge::WococoToRococo => {
 				type Source = relay_wococo_client::Wococo;
 				type Target = relay_rococo_client::Rococo;
 				type Finality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
 
 				$generic
-			}
+			},
+			RelayHeadersBridge::KusamaToPolkadot => {
+				type Source = relay_kusama_client::Kusama;
+				type Target = relay_polkadot_client::Polkadot;
+				type Finality = crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot;
+
+				$generic
+			},
+			RelayHeadersBridge::PolkadotToKusama => {
+				type Source = relay_polkadot_client::Polkadot;
+				type Target = relay_kusama_client::Kusama;
+				type Finality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama;
+
+				$generic
+			},
 		}
 	};
 }
@@ -98,16 +118,18 @@ impl RelayHeaders {
 		select_bridge!(self.bridge, {
 			let source_client = self.source.to_client::<Source>().await?;
 			let target_client = self.target.to_client::<Target>().await?;
+			let target_transactions_mortality = self.target_sign.target_transactions_mortality;
 			let target_sign = self.target_sign.to_keypair::<Target>()?;
 			let metrics_params = Finality::customize_metrics(self.prometheus_params.into())?;
 			let finality = Finality::new(target_client.clone(), target_sign);
 			finality.start_relay_guards();
 
-			crate::finality_pipeline::run(
+			substrate_relay_helper::finality_pipeline::run(
 				finality,
 				source_client,
 				target_client,
 				self.only_mandatory_headers,
+				target_transactions_mortality,
 				metrics_params,
 			)
 			.await
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
index e71ea6aeaa2f8aeef17353abea7f02f62b2e59a2..076331112a21f0cc04a478f94efd17ab407da152 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs
@@ -22,20 +22,41 @@
 //! 2) add `declare_bridge_options!(...)` for the bridge;
 //! 3) add bridge support to the `select_bridge! { ... }` macro.
 
-use crate::cli::{CliChain, HexLaneId, PrometheusParams};
-use crate::declare_chain_options;
-use crate::messages_lane::MessagesRelayParams;
-use crate::on_demand_headers::OnDemandHeadersRelay;
-
 use futures::{FutureExt, TryFutureExt};
-use relay_utils::metrics::MetricsParams;
 use structopt::StructOpt;
+use strum::VariantNames;
+
+use codec::Encode;
+use messages_relay::relay_strategy::MixStrategy;
+use relay_substrate_client::{
+	AccountIdOf, Chain, Client, TransactionSignScheme, UnsignedTransaction,
+};
+use relay_utils::metrics::MetricsParams;
+use sp_core::{Bytes, Pair};
+use substrate_relay_helper::{
+	messages_lane::{MessagesRelayParams, SubstrateMessageLane},
+	on_demand_headers::OnDemandHeadersRelay,
+};
+
+use crate::{
+	cli::{relay_messages::RelayerMode, CliChain, HexLaneId, PrometheusParams},
+	declare_chain_options,
+};
+
+/// Maximal allowed conversion rate error ratio (abs(real - stored) / stored) that we allow.
+///
+/// If it is zero, then transaction will be submitted every time we see difference between
+/// stored and real conversion rates. If it is large enough (e.g. > than 10 percents, which is 0.1),
+/// then rational relayers may stop relaying messages because they were submitted using
+/// lesser conversion rate.
+const CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO: f64 = 0.05;
 
 /// Start headers+messages relayer process.
 #[derive(StructOpt)]
 pub enum RelayHeadersAndMessages {
 	MillauRialto(MillauRialtoHeadersAndMessages),
 	RococoWococo(RococoWococoHeadersAndMessages),
+	KusamaPolkadot(KusamaPolkadotHeadersAndMessages),
 }
 
 /// Parameters that have the same names across all bridges.
@@ -44,13 +65,22 @@ pub struct HeadersAndMessagesSharedParams {
 	/// Hex-encoded lane identifiers that should be served by the complex relay.
 	#[structopt(long, default_value = "00000000")]
 	lane: Vec<HexLaneId>,
+	#[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")]
+	relayer_mode: RelayerMode,
+	/// Create relayers fund accounts on both chains, if it does not exists yet.
+	#[structopt(long)]
+	create_relayers_fund_accounts: bool,
+	/// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set)
+	/// are relayed.
+	#[structopt(long)]
+	only_mandatory_headers: bool,
 	#[structopt(flatten)]
 	prometheus_params: PrometheusParams,
 }
 
-// The reason behind this macro is that 'normal' relays are using source and target chains terminology,
-// which is unusable for both-way relays (if you're relaying headers from Rialto to Millau and from
-// Millau to Rialto, then which chain is source?).
+// The reason behind this macro is that 'normal' relays are using source and target chains
+// terminology, which is unusable for both-way relays (if you're relaying headers from Rialto to
+// Millau and from Millau to Rialto, then which chain is source?).
 macro_rules! declare_bridge_options {
 	($chain1:ident, $chain2:ident) => {
 		paste::item! {
@@ -64,12 +94,15 @@ macro_rules! declare_bridge_options {
 				#[structopt(flatten)]
 				left_sign: [<$chain1 SigningParams>],
 				#[structopt(flatten)]
+				left_messages_pallet_owner: [<$chain1 MessagesPalletOwnerSigningParams>],
+				#[structopt(flatten)]
 				right: [<$chain2 ConnectionParams>],
 				#[structopt(flatten)]
 				right_sign: [<$chain2 SigningParams>],
+				#[structopt(flatten)]
+				right_messages_pallet_owner: [<$chain2 MessagesPalletOwnerSigningParams>],
 			}
 
-			#[allow(unreachable_patterns)]
 			impl From<RelayHeadersAndMessages> for [<$chain1 $chain2 HeadersAndMessages>] {
 				fn from(relay_params: RelayHeadersAndMessages) -> [<$chain1 $chain2 HeadersAndMessages>] {
 					match relay_params {
@@ -91,40 +124,223 @@ macro_rules! select_bridge {
 				type Left = relay_millau_client::Millau;
 				type Right = relay_rialto_client::Rialto;
 
-				type LeftToRightFinality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
-				type RightToLeftFinality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
-
-				type LeftToRightMessages = crate::chains::millau_messages_to_rialto::MillauMessagesToRialto;
-				type RightToLeftMessages = crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau;
-
-				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber = bp_millau::SESSION_LENGTH;
-				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber = bp_rialto::SESSION_LENGTH;
+				type LeftToRightFinality =
+					crate::chains::millau_headers_to_rialto::MillauFinalityToRialto;
+				type RightToLeftFinality =
+					crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau;
+
+				type LeftToRightMessages =
+					crate::chains::millau_messages_to_rialto::MillauMessagesToRialto;
+				type RightToLeftMessages =
+					crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau;
+
+				type LeftAccountIdConverter = bp_millau::AccountIdConverter;
+				type RightAccountIdConverter = bp_rialto::AccountIdConverter;
+
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_millau::BlockNumber =
+					bp_millau::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_rialto::BlockNumber =
+					bp_rialto::SESSION_LENGTH;
+
+				use crate::chains::{
+					millau_messages_to_rialto::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+						update_rialto_to_millau_conversion_rate as update_right_to_left_conversion_rate,
+					},
+					rialto_messages_to_millau::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+						update_millau_to_rialto_conversion_rate as update_left_to_right_conversion_rate,
+					},
+				};
+
+				async fn left_create_account(
+					_left_client: Client<Left>,
+					_left_sign: <Left as TransactionSignScheme>::AccountKeyPair,
+					_account_id: AccountIdOf<Left>,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Account creation is not supported by this bridge"))
+				}
 
-				use crate::chains::millau_messages_to_rialto::run as left_to_right_messages;
-				use crate::chains::rialto_messages_to_millau::run as right_to_left_messages;
+				async fn right_create_account(
+					_right_client: Client<Right>,
+					_right_sign: <Right as TransactionSignScheme>::AccountKeyPair,
+					_account_id: AccountIdOf<Right>,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Account creation is not supported by this bridge"))
+				}
 
 				$generic
-			}
+			},
 			RelayHeadersAndMessages::RococoWococo(_) => {
 				type Params = RococoWococoHeadersAndMessages;
 
 				type Left = relay_rococo_client::Rococo;
 				type Right = relay_wococo_client::Wococo;
 
-				type LeftToRightFinality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
-				type RightToLeftFinality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
+				type LeftToRightFinality =
+					crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo;
+				type RightToLeftFinality =
+					crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo;
+
+				type LeftToRightMessages =
+					crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo;
+				type RightToLeftMessages =
+					crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo;
+
+				type LeftAccountIdConverter = bp_rococo::AccountIdConverter;
+				type RightAccountIdConverter = bp_wococo::AccountIdConverter;
+
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber =
+					bp_rococo::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber =
+					bp_wococo::SESSION_LENGTH;
+
+				use crate::chains::{
+					rococo_messages_to_wococo::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+					},
+					wococo_messages_to_rococo::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+					},
+				};
+
+				async fn update_right_to_left_conversion_rate(
+					_client: Client<Left>,
+					_signer: <Left as TransactionSignScheme>::AccountKeyPair,
+					_updated_rate: f64,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Conversion rate is not supported by this bridge"))
+				}
 
-				type LeftToRightMessages = crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo;
-				type RightToLeftMessages = crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo;
+				async fn update_left_to_right_conversion_rate(
+					_client: Client<Right>,
+					_signer: <Right as TransactionSignScheme>::AccountKeyPair,
+					_updated_rate: f64,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Conversion rate is not supported by this bridge"))
+				}
 
-				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_rococo::BlockNumber = bp_rococo::SESSION_LENGTH;
-				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_wococo::BlockNumber = bp_wococo::SESSION_LENGTH;
+				async fn left_create_account(
+					_left_client: Client<Left>,
+					_left_sign: <Left as TransactionSignScheme>::AccountKeyPair,
+					_account_id: AccountIdOf<Left>,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Account creation is not supported by this bridge"))
+				}
 
-				use crate::chains::rococo_messages_to_wococo::run as left_to_right_messages;
-				use crate::chains::wococo_messages_to_rococo::run as right_to_left_messages;
+				async fn right_create_account(
+					_right_client: Client<Right>,
+					_right_sign: <Right as TransactionSignScheme>::AccountKeyPair,
+					_account_id: AccountIdOf<Right>,
+				) -> anyhow::Result<()> {
+					Err(anyhow::format_err!("Account creation is not supported by this bridge"))
+				}
 
 				$generic
-			}
+			},
+			RelayHeadersAndMessages::KusamaPolkadot(_) => {
+				type Params = KusamaPolkadotHeadersAndMessages;
+
+				type Left = relay_kusama_client::Kusama;
+				type Right = relay_polkadot_client::Polkadot;
+
+				type LeftToRightFinality =
+					crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot;
+				type RightToLeftFinality =
+					crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama;
+
+				type LeftToRightMessages =
+					crate::chains::kusama_messages_to_polkadot::KusamaMessagesToPolkadot;
+				type RightToLeftMessages =
+					crate::chains::polkadot_messages_to_kusama::PolkadotMessagesToKusama;
+
+				type LeftAccountIdConverter = bp_kusama::AccountIdConverter;
+				type RightAccountIdConverter = bp_polkadot::AccountIdConverter;
+
+				const MAX_MISSING_LEFT_HEADERS_AT_RIGHT: bp_kusama::BlockNumber =
+					bp_kusama::SESSION_LENGTH;
+				const MAX_MISSING_RIGHT_HEADERS_AT_LEFT: bp_polkadot::BlockNumber =
+					bp_polkadot::SESSION_LENGTH;
+
+				use crate::chains::{
+					kusama_messages_to_polkadot::{
+						add_standalone_metrics as add_left_to_right_standalone_metrics,
+						run as left_to_right_messages,
+						update_polkadot_to_kusama_conversion_rate as update_right_to_left_conversion_rate,
+					},
+					polkadot_messages_to_kusama::{
+						add_standalone_metrics as add_right_to_left_standalone_metrics,
+						run as right_to_left_messages,
+						update_kusama_to_polkadot_conversion_rate as update_left_to_right_conversion_rate,
+					},
+				};
+
+				async fn left_create_account(
+					left_client: Client<Left>,
+					left_sign: <Left as TransactionSignScheme>::AccountKeyPair,
+					account_id: AccountIdOf<Left>,
+				) -> anyhow::Result<()> {
+					let left_genesis_hash = *left_client.genesis_hash();
+					left_client
+						.submit_signed_extrinsic(
+							left_sign.public().into(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Left::sign_transaction(left_genesis_hash, &left_sign, relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											relay_kusama_client::runtime::Call::Balances(
+												relay_kusama_client::runtime::BalancesCall::transfer(
+													bp_kusama::AccountAddress::Id(account_id),
+													bp_kusama::EXISTENTIAL_DEPOSIT.into(),
+												),
+											),
+											transaction_nonce,
+										),
+									).encode()
+								)
+							},
+						)
+						.await
+						.map(drop)
+						.map_err(|e| anyhow::format_err!("{}", e))
+				}
+
+				async fn right_create_account(
+					right_client: Client<Right>,
+					right_sign: <Right as TransactionSignScheme>::AccountKeyPair,
+					account_id: AccountIdOf<Right>,
+				) -> anyhow::Result<()> {
+					let right_genesis_hash = *right_client.genesis_hash();
+					right_client
+						.submit_signed_extrinsic(
+							right_sign.public().into(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Right::sign_transaction(right_genesis_hash, &right_sign, relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											relay_polkadot_client::runtime::Call::Balances(
+												relay_polkadot_client::runtime::BalancesCall::transfer(
+													bp_polkadot::AccountAddress::Id(account_id),
+													bp_polkadot::EXISTENTIAL_DEPOSIT.into(),
+												),
+											),
+											transaction_nonce,
+										),
+									).encode()
+								)
+							},
+						)
+						.await
+						.map(drop)
+						.map_err(|e| anyhow::format_err!("{}", e))
+				}
+
+				$generic
+			},
 		}
 	};
 }
@@ -134,9 +350,12 @@ declare_chain_options!(Millau, millau);
 declare_chain_options!(Rialto, rialto);
 declare_chain_options!(Rococo, rococo);
 declare_chain_options!(Wococo, wococo);
+declare_chain_options!(Kusama, kusama);
+declare_chain_options!(Polkadot, polkadot);
 // All supported bridges.
 declare_bridge_options!(Millau, Rialto);
 declare_bridge_options!(Rococo, Wococo);
+declare_bridge_options!(Kusama, Polkadot);
 
 impl RelayHeadersAndMessages {
 	/// Run the command.
@@ -145,26 +364,147 @@ impl RelayHeadersAndMessages {
 			let params: Params = self.into();
 
 			let left_client = params.left.to_client::<Left>().await?;
+			let left_transactions_mortality = params.left_sign.transactions_mortality()?;
 			let left_sign = params.left_sign.to_keypair::<Left>()?;
+			let left_messages_pallet_owner =
+				params.left_messages_pallet_owner.to_keypair::<Left>()?;
 			let right_client = params.right.to_client::<Right>().await?;
+			let right_transactions_mortality = params.right_sign.transactions_mortality()?;
 			let right_sign = params.right_sign.to_keypair::<Right>()?;
+			let right_messages_pallet_owner =
+				params.right_messages_pallet_owner.to_keypair::<Right>()?;
 
 			let lanes = params.shared.lane;
+			let relayer_mode = params.shared.relayer_mode.into();
+			let relay_strategy = MixStrategy::new(relayer_mode);
+
+			const METRIC_IS_SOME_PROOF: &str =
+				"it is `None` when metric has been already registered; \
+				this is the command entrypoint, so nothing has been registered yet; \
+				qed";
 
 			let metrics_params: MetricsParams = params.shared.prometheus_params.into();
 			let metrics_params = relay_utils::relay_metrics(None, metrics_params).into_params();
+			let (metrics_params, left_to_right_metrics) =
+				add_left_to_right_standalone_metrics(None, metrics_params, left_client.clone())?;
+			let (metrics_params, right_to_left_metrics) =
+				add_right_to_left_standalone_metrics(None, metrics_params, right_client.clone())?;
+			if let Some(left_messages_pallet_owner) = left_messages_pallet_owner {
+				let left_client = left_client.clone();
+				substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop(
+					left_to_right_metrics
+						.target_to_source_conversion_rate
+						.expect(METRIC_IS_SOME_PROOF),
+					left_to_right_metrics
+						.target_to_base_conversion_rate
+						.clone()
+						.expect(METRIC_IS_SOME_PROOF),
+					left_to_right_metrics
+						.source_to_base_conversion_rate
+						.clone()
+						.expect(METRIC_IS_SOME_PROOF),
+					CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO,
+					move |new_rate| {
+						log::info!(
+							target: "bridge",
+							"Going to update {} -> {} (on {}) conversion rate to {}.",
+							Right::NAME,
+							Left::NAME,
+							Left::NAME,
+							new_rate,
+						);
+						update_right_to_left_conversion_rate(
+							left_client.clone(),
+							left_messages_pallet_owner.clone(),
+							new_rate,
+						)
+					},
+				);
+			}
+			if let Some(right_messages_pallet_owner) = right_messages_pallet_owner {
+				let right_client = right_client.clone();
+				substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop(
+					right_to_left_metrics
+						.target_to_source_conversion_rate
+						.expect(METRIC_IS_SOME_PROOF),
+					left_to_right_metrics
+						.source_to_base_conversion_rate
+						.expect(METRIC_IS_SOME_PROOF),
+					left_to_right_metrics
+						.target_to_base_conversion_rate
+						.expect(METRIC_IS_SOME_PROOF),
+					CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO,
+					move |new_rate| {
+						log::info!(
+							target: "bridge",
+							"Going to update {} -> {} (on {}) conversion rate to {}.",
+							Left::NAME,
+							Right::NAME,
+							Right::NAME,
+							new_rate,
+						);
+						update_left_to_right_conversion_rate(
+							right_client.clone(),
+							right_messages_pallet_owner.clone(),
+							new_rate,
+						)
+					},
+				);
+			}
+
+			if params.shared.create_relayers_fund_accounts {
+				let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::<
+					AccountIdOf<Left>,
+					LeftAccountIdConverter,
+				>();
+				let relayers_fund_account_balance =
+					left_client.free_native_balance(relayer_fund_acount_id.clone()).await;
+				if let Err(relay_substrate_client::Error::AccountDoesNotExist) =
+					relayers_fund_account_balance
+				{
+					log::info!(target: "bridge", "Going to create relayers fund account at {}.", Left::NAME);
+					left_create_account(
+						left_client.clone(),
+						left_sign.clone(),
+						relayer_fund_acount_id,
+					)
+					.await?;
+				}
+
+				let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::<
+					AccountIdOf<Right>,
+					RightAccountIdConverter,
+				>();
+				let relayers_fund_account_balance =
+					right_client.free_native_balance(relayer_fund_acount_id.clone()).await;
+				if let Err(relay_substrate_client::Error::AccountDoesNotExist) =
+					relayers_fund_account_balance
+				{
+					log::info!(target: "bridge", "Going to create relayers fund account at {}.", Right::NAME);
+					right_create_account(
+						right_client.clone(),
+						right_sign.clone(),
+						relayer_fund_acount_id,
+					)
+					.await?;
+				}
+			}
 
 			let left_to_right_on_demand_headers = OnDemandHeadersRelay::new(
 				left_client.clone(),
 				right_client.clone(),
+				right_transactions_mortality,
 				LeftToRightFinality::new(right_client.clone(), right_sign.clone()),
 				MAX_MISSING_LEFT_HEADERS_AT_RIGHT,
+				params.shared.only_mandatory_headers,
 			);
 			let right_to_left_on_demand_headers = OnDemandHeadersRelay::new(
 				right_client.clone(),
 				left_client.clone(),
+				left_transactions_mortality,
 				RightToLeftFinality::new(left_client.clone(), left_sign.clone()),
 				MAX_MISSING_RIGHT_HEADERS_AT_LEFT,
+				params.shared.only_mandatory_headers,
 			);
 
 			// Need 2x capacity since we consider both directions for each lane
@@ -174,28 +514,38 @@ impl RelayHeadersAndMessages {
 				let left_to_right_messages = left_to_right_messages(MessagesRelayParams {
 					source_client: left_client.clone(),
 					source_sign: left_sign.clone(),
+					source_transactions_mortality: left_transactions_mortality,
 					target_client: right_client.clone(),
 					target_sign: right_sign.clone(),
+					target_transactions_mortality: right_transactions_mortality,
 					source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()),
 					target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()),
 					lane_id: lane,
 					metrics_params: metrics_params.clone().disable().metrics_prefix(
-						messages_relay::message_lane_loop::metrics_prefix::<LeftToRightMessages>(&lane),
+						messages_relay::message_lane_loop::metrics_prefix::<
+							<LeftToRightMessages as SubstrateMessageLane>::MessageLane,
+						>(&lane),
 					),
+					relay_strategy: relay_strategy.clone(),
 				})
 				.map_err(|e| anyhow::format_err!("{}", e))
 				.boxed();
 				let right_to_left_messages = right_to_left_messages(MessagesRelayParams {
 					source_client: right_client.clone(),
 					source_sign: right_sign.clone(),
+					source_transactions_mortality: right_transactions_mortality,
 					target_client: left_client.clone(),
 					target_sign: left_sign.clone(),
+					target_transactions_mortality: left_transactions_mortality,
 					source_to_target_headers_relay: Some(right_to_left_on_demand_headers.clone()),
 					target_to_source_headers_relay: Some(left_to_right_on_demand_headers.clone()),
 					lane_id: lane,
 					metrics_params: metrics_params.clone().disable().metrics_prefix(
-						messages_relay::message_lane_loop::metrics_prefix::<RightToLeftMessages>(&lane),
+						messages_relay::message_lane_loop::metrics_prefix::<
+							<RightToLeftMessages as SubstrateMessageLane>::MessageLane,
+						>(&lane),
 					),
+					relay_strategy: relay_strategy.clone(),
 				})
 				.map_err(|e| anyhow::format_err!("{}", e))
 				.boxed();
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs
index 94630886ca38e984ebd172373be7b74ffa8eb194..4b2e0c975602a1012d50ff19f6edfa036c6b14c8 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs
@@ -14,25 +14,51 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::{
-	HexLaneId, PrometheusParams, SourceConnectionParams, SourceSigningParams, TargetConnectionParams,
-	TargetSigningParams,
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+use messages_relay::relay_strategy::MixStrategy;
+use substrate_relay_helper::messages_lane::MessagesRelayParams;
+
+use crate::{
+	cli::{
+		bridge::FullBridge, HexLaneId, PrometheusParams, SourceConnectionParams,
+		SourceSigningParams, TargetConnectionParams, TargetSigningParams,
+	},
+	select_full_bridge,
 };
-use crate::messages_lane::MessagesRelayParams;
-use crate::select_full_bridge;
 
-use structopt::StructOpt;
+/// Relayer operating mode.
+#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq)]
+#[strum(serialize_all = "kebab_case")]
+pub enum RelayerMode {
+	/// The relayer doesn't care about rewards.
+	Altruistic,
+	/// The relayer will deliver all messages and confirmations as long as he's not losing any
+	/// funds.
+	Rational,
+}
+
+impl From<RelayerMode> for messages_relay::message_lane_loop::RelayerMode {
+	fn from(mode: RelayerMode) -> Self {
+		match mode {
+			RelayerMode::Altruistic => Self::Altruistic,
+			RelayerMode::Rational => Self::Rational,
+		}
+	}
+}
 
 /// Start messages relayer process.
 #[derive(StructOpt)]
 pub struct RelayMessages {
 	/// A bridge instance to relay messages for.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	/// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`.
 	#[structopt(long, default_value = "00000000")]
 	lane: HexLaneId,
+	#[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")]
+	relayer_mode: RelayerMode,
 	#[structopt(flatten)]
 	source: SourceConnectionParams,
 	#[structopt(flatten)]
@@ -51,21 +77,68 @@ impl RelayMessages {
 		select_full_bridge!(self.bridge, {
 			let source_client = self.source.to_client::<Source>().await?;
 			let source_sign = self.source_sign.to_keypair::<Source>()?;
+			let source_transactions_mortality = self.source_sign.transactions_mortality()?;
 			let target_client = self.target.to_client::<Target>().await?;
 			let target_sign = self.target_sign.to_keypair::<Target>()?;
+			let target_transactions_mortality = self.target_sign.transactions_mortality()?;
+			let relayer_mode = self.relayer_mode.into();
+			let relay_strategy = MixStrategy::new(relayer_mode);
 
 			relay_messages(MessagesRelayParams {
 				source_client,
 				source_sign,
+				source_transactions_mortality,
 				target_client,
 				target_sign,
+				target_transactions_mortality,
 				source_to_target_headers_relay: None,
 				target_to_source_headers_relay: None,
 				lane_id: self.lane.into(),
 				metrics_params: self.prometheus_params.into(),
+				relay_strategy,
 			})
 			.await
 			.map_err(|e| anyhow::format_err!("{}", e))
 		})
 	}
 }
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn should_use_rational_relayer_mode_by_default() {
+		assert_eq!(
+			RelayMessages::from_iter(vec![
+				"relay-messages",
+				"rialto-to-millau",
+				"--source-port=0",
+				"--source-signer=//Alice",
+				"--target-port=0",
+				"--target-signer=//Alice",
+				"--lane=00000000",
+			])
+			.relayer_mode,
+			RelayerMode::Rational,
+		);
+	}
+
+	#[test]
+	fn should_accept_altruistic_relayer_mode() {
+		assert_eq!(
+			RelayMessages::from_iter(vec![
+				"relay-messages",
+				"rialto-to-millau",
+				"--source-port=0",
+				"--source-signer=//Alice",
+				"--target-port=0",
+				"--target-signer=//Alice",
+				"--lane=00000000",
+				"--relayer-mode=altruistic",
+			])
+			.relayer_mode,
+			RelayerMode::Altruistic,
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs b/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs
new file mode 100644
index 0000000000000000000000000000000000000000..64663d7e8ec026032cdb67c26fbf96508cdf3b39
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs
@@ -0,0 +1,559 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use crate::cli::{Balance, TargetConnectionParams, TargetSigningParams};
+
+use codec::{Decode, Encode};
+use num_traits::{One, Zero};
+use relay_substrate_client::{
+	BlockWithJustification, Chain, Client, Error as SubstrateError, HeaderOf, TransactionSignScheme,
+};
+use relay_utils::FailedClient;
+use sp_core::Bytes;
+use sp_runtime::{
+	traits::{Hash, Header as HeaderT},
+	transaction_validity::TransactionPriority,
+};
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+/// Start resubmit transactions process.
+#[derive(StructOpt)]
+pub struct ResubmitTransactions {
+	/// A bridge instance to relay headers for.
+	#[structopt(possible_values = RelayChain::VARIANTS, case_insensitive = true)]
+	chain: RelayChain,
+	#[structopt(flatten)]
+	target: TargetConnectionParams,
+	#[structopt(flatten)]
+	target_sign: TargetSigningParams,
+	/// Number of blocks we see before considering queued transaction as stalled.
+	#[structopt(long, default_value = "5")]
+	stalled_blocks: u32,
+	/// Tip limit. We'll never submit transaction with larger tip.
+	#[structopt(long)]
+	tip_limit: Balance,
+	/// Tip increase step. We'll be checking updated transaction priority by increasing its tip by
+	/// this step.
+	#[structopt(long)]
+	tip_step: Balance,
+	/// Priority selection strategy.
+	#[structopt(subcommand)]
+	strategy: PrioritySelectionStrategy,
+}
+
+/// Chain, which transactions we're going to track && resubmit.
+#[derive(Debug, EnumString, EnumVariantNames)]
+#[strum(serialize_all = "kebab_case")]
+pub enum RelayChain {
+	Millau,
+	Kusama,
+	Polkadot,
+}
+
+/// Strategy to use for priority selection.
+#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy)]
+pub enum PrioritySelectionStrategy {
+	/// Strategy selects tip that changes transaction priority to be better than priority of
+	/// the first transaction of previous block.
+	///
+	/// It only makes sense to use this strategy for Millau transactions. Millau has transactions
+	/// that are close to block limits, so if there are any other queued transactions, 'large'
+	/// transaction won't fit the block && will be postponed. To avoid this, we change its priority
+	/// to some large value, making it best transaction => it'll be 'mined' first.
+	MakeItBestTransaction,
+	/// Strategy selects tip that changes transaction priority to be better than priority of
+	/// selected queued transaction.
+	///
+	/// When we first see stalled transaction, we make it better than worst 1/4 of queued
+	/// transactions. If it is still stalled, we'll make it better than 1/3 of queued transactions,
+	/// ...
+	MakeItBetterThanQueuedTransaction,
+}
+
+macro_rules! select_bridge {
+	($bridge: expr, $generic: tt) => {
+		match $bridge {
+			RelayChain::Millau => {
+				type Target = relay_millau_client::Millau;
+				type TargetSign = relay_millau_client::Millau;
+
+				$generic
+			},
+			RelayChain::Kusama => {
+				type Target = relay_kusama_client::Kusama;
+				type TargetSign = relay_kusama_client::Kusama;
+
+				$generic
+			},
+			RelayChain::Polkadot => {
+				type Target = relay_polkadot_client::Polkadot;
+				type TargetSign = relay_polkadot_client::Polkadot;
+
+				$generic
+			},
+		}
+	};
+}
+
+impl ResubmitTransactions {
+	/// Run the command.
+	pub async fn run(self) -> anyhow::Result<()> {
+		select_bridge!(self.chain, {
+			let relay_loop_name = format!("ResubmitTransactions{}", Target::NAME);
+			let client = self.target.to_client::<Target>().await?;
+			let key_pair = self.target_sign.to_keypair::<Target>()?;
+
+			relay_utils::relay_loop((), client)
+				.run(relay_loop_name, move |_, client, _| {
+					run_until_connection_lost::<Target, TargetSign>(
+						client,
+						key_pair.clone(),
+						Context {
+							strategy: self.strategy,
+							best_header: HeaderOf::<Target>::new(
+								Default::default(),
+								Default::default(),
+								Default::default(),
+								Default::default(),
+								Default::default(),
+							),
+							transaction: None,
+							resubmitted: 0,
+							stalled_for: Zero::zero(),
+							stalled_for_limit: self.stalled_blocks as _,
+							tip_step: self.tip_step.cast() as _,
+							tip_limit: self.tip_limit.cast() as _,
+						},
+					)
+				})
+				.await
+				.map_err(Into::into)
+		})
+	}
+}
+
+impl PrioritySelectionStrategy {
+	/// Select target priority.
+	async fn select_target_priority<C: Chain, S: TransactionSignScheme<Chain = C>>(
+		&self,
+		client: &Client<C>,
+		context: &Context<C>,
+	) -> Result<Option<TransactionPriority>, SubstrateError> {
+		match *self {
+			PrioritySelectionStrategy::MakeItBestTransaction =>
+				read_previous_block_best_priority::<C, S>(client, context).await,
+			PrioritySelectionStrategy::MakeItBetterThanQueuedTransaction =>
+				select_priority_from_queue::<C, S>(client, context).await,
+		}
+	}
+}
+
+#[derive(Debug)]
+struct Context<C: Chain> {
+	/// Priority selection strategy.
+	strategy: PrioritySelectionStrategy,
+	/// Best known block header.
+	best_header: C::Header,
+	/// Hash of the (potentially) stalled transaction.
+	transaction: Option<C::Hash>,
+	/// How many times we have resubmitted this `transaction`?
+	resubmitted: u32,
+	/// This transaction is in pool for `stalled_for` wakeup intervals.
+	stalled_for: C::BlockNumber,
+	/// When `stalled_for` reaching this limit, transaction is considered stalled.
+	stalled_for_limit: C::BlockNumber,
+	/// Tip step interval.
+	tip_step: C::Balance,
+	/// Maximal tip.
+	tip_limit: C::Balance,
+}
+
+impl<C: Chain> Context<C> {
+	/// Return true if transaction has stalled.
+	fn is_stalled(&self) -> bool {
+		self.stalled_for >= self.stalled_for_limit
+	}
+
+	/// Notice resubmitted transaction.
+	fn notice_resubmitted_transaction(mut self, transaction: C::Hash) -> Self {
+		self.transaction = Some(transaction);
+		self.stalled_for = Zero::zero();
+		self.resubmitted += 1;
+		self
+	}
+
+	/// Notice transaction from the transaction pool.
+	fn notice_transaction(mut self, transaction: C::Hash) -> Self {
+		if self.transaction == Some(transaction) {
+			self.stalled_for += One::one();
+		} else {
+			self.transaction = Some(transaction);
+			self.stalled_for = One::one();
+			self.resubmitted = 0;
+		}
+		self
+	}
+}
+
+/// Run resubmit transactions loop.
+async fn run_until_connection_lost<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: Client<C>,
+	key_pair: S::AccountKeyPair,
+	mut context: Context<C>,
+) -> Result<(), FailedClient> {
+	loop {
+		async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await;
+
+		let result = run_loop_iteration::<C, S>(client.clone(), key_pair.clone(), context).await;
+		context = match result {
+			Ok(context) => context,
+			Err(error) => {
+				log::error!(
+					target: "bridge",
+					"Resubmit {} transactions loop has failed with error: {:?}",
+					C::NAME,
+					error,
+				);
+				return Err(FailedClient::Target)
+			},
+		};
+	}
+}
+
+/// Run single loop iteration.
+async fn run_loop_iteration<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: Client<C>,
+	key_pair: S::AccountKeyPair,
+	mut context: Context<C>,
+) -> Result<Context<C>, SubstrateError> {
+	// correct best header is required for all other actions
+	context.best_header = client.best_header().await?;
+
+	// check if there's queued transaction, signed by given author
+	let original_transaction = match lookup_signer_transaction::<C, S>(&client, &key_pair).await? {
+		Some(original_transaction) => original_transaction,
+		None => {
+			log::trace!(target: "bridge", "No {} transactions from required signer in the txpool", C::NAME);
+			return Ok(context)
+		},
+	};
+	let original_transaction_hash = C::Hasher::hash(&original_transaction.encode());
+	let context = context.notice_transaction(original_transaction_hash);
+
+	// if transaction hasn't been mined for `stalled_blocks`, we'll need to resubmit it
+	if !context.is_stalled() {
+		log::trace!(
+			target: "bridge",
+			"{} transaction {:?} is not yet stalled ({:?}/{:?})",
+			C::NAME,
+			context.transaction,
+			context.stalled_for,
+			context.stalled_for_limit,
+		);
+		return Ok(context)
+	}
+
+	// select priority for updated transaction
+	let target_priority =
+		match context.strategy.select_target_priority::<C, S>(&client, &context).await? {
+			Some(target_priority) => target_priority,
+			None => {
+				log::trace!(target: "bridge", "Failed to select target priority");
+				return Ok(context)
+			},
+		};
+
+	// update transaction tip
+	let (is_updated, updated_transaction) = update_transaction_tip::<C, S>(
+		&client,
+		&key_pair,
+		context.best_header.hash(),
+		original_transaction,
+		context.tip_step,
+		context.tip_limit,
+		target_priority,
+	)
+	.await?;
+
+	if !is_updated {
+		log::trace!(target: "bridge", "{} transaction tip can not be updated. Reached limit?", C::NAME);
+		return Ok(context)
+	}
+
+	let updated_transaction = updated_transaction.encode();
+	let updated_transaction_hash = C::Hasher::hash(&updated_transaction);
+	client.submit_unsigned_extrinsic(Bytes(updated_transaction)).await?;
+
+	log::info!(
+		target: "bridge",
+		"Replaced {} transaction {} with {} in txpool",
+		C::NAME,
+		original_transaction_hash,
+		updated_transaction_hash,
+	);
+
+	Ok(context.notice_resubmitted_transaction(updated_transaction_hash))
+}
+
+/// Search transaction pool for transaction, signed by given key pair.
+async fn lookup_signer_transaction<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: &Client<C>,
+	key_pair: &S::AccountKeyPair,
+) -> Result<Option<S::SignedTransaction>, SubstrateError> {
+	let pending_transactions = client.pending_extrinsics().await?;
+	for pending_transaction in pending_transactions {
+		let pending_transaction = S::SignedTransaction::decode(&mut &pending_transaction.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
+		if !S::is_signed_by(key_pair, &pending_transaction) {
+			continue
+		}
+
+		return Ok(Some(pending_transaction))
+	}
+
+	Ok(None)
+}
+
+/// Read priority of best signed transaction of previous block.
+async fn read_previous_block_best_priority<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: &Client<C>,
+	context: &Context<C>,
+) -> Result<Option<TransactionPriority>, SubstrateError> {
+	let best_block = client.get_block(Some(context.best_header.hash())).await?;
+	let best_transaction = best_block
+		.extrinsics()
+		.iter()
+		.filter_map(|xt| S::SignedTransaction::decode(&mut &xt[..]).ok())
+		.find(|xt| S::is_signed(xt));
+	match best_transaction {
+		Some(best_transaction) => Ok(Some(
+			client
+				.validate_transaction(*context.best_header.parent_hash(), best_transaction)
+				.await??
+				.priority,
+		)),
+		None => Ok(None),
+	}
+}
+
+/// Select priority of some queued transaction.
+async fn select_priority_from_queue<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: &Client<C>,
+	context: &Context<C>,
+) -> Result<Option<TransactionPriority>, SubstrateError> {
+	// select transaction from the queue
+	let queued_transactions = client.pending_extrinsics().await?;
+	let selected_transaction = match select_transaction_from_queue(queued_transactions, context) {
+		Some(selected_transaction) => selected_transaction,
+		None => return Ok(None),
+	};
+
+	let selected_transaction = S::SignedTransaction::decode(&mut &selected_transaction[..])
+		.map_err(SubstrateError::ResponseParseFailed)?;
+	let target_priority = client
+		.validate_transaction(context.best_header.hash(), selected_transaction)
+		.await??
+		.priority;
+	Ok(Some(target_priority))
+}
+
+/// Select transaction with target priority from the vec of queued transactions.
+fn select_transaction_from_queue<C: Chain>(
+	mut queued_transactions: Vec<Bytes>,
+	context: &Context<C>,
+) -> Option<Bytes> {
+	if queued_transactions.is_empty() {
+		return None
+	}
+
+	// the more times we resubmit transaction (`context.resubmitted`), the closer we move
+	// to the front of the transaction queue
+	let total_transactions = queued_transactions.len();
+	let resubmitted_factor = context.resubmitted;
+	let divisor =
+		1usize.saturating_add(1usize.checked_shl(resubmitted_factor).unwrap_or(usize::MAX));
+	let transactions_to_skip = total_transactions / divisor;
+
+	Some(
+		queued_transactions
+			.swap_remove(std::cmp::min(total_transactions - 1, transactions_to_skip)),
+	)
+}
+
+/// Try to find appropriate tip for transaction so that its priority is larger than given.
+async fn update_transaction_tip<C: Chain, S: TransactionSignScheme<Chain = C>>(
+	client: &Client<C>,
+	key_pair: &S::AccountKeyPair,
+	at_block: C::Hash,
+	tx: S::SignedTransaction,
+	tip_step: C::Balance,
+	tip_limit: C::Balance,
+	target_priority: TransactionPriority,
+) -> Result<(bool, S::SignedTransaction), SubstrateError> {
+	let stx = format!("{:?}", tx);
+	let mut current_priority = client.validate_transaction(at_block, tx.clone()).await??.priority;
+	let mut unsigned_tx = S::parse_transaction(tx).ok_or_else(|| {
+		SubstrateError::Custom(format!("Failed to parse {} transaction {}", C::NAME, stx,))
+	})?;
+	let old_tip = unsigned_tx.tip;
+
+	while current_priority < target_priority {
+		let next_tip = unsigned_tx.tip + tip_step;
+		if next_tip > tip_limit {
+			break
+		}
+
+		log::trace!(
+			target: "bridge",
+			"{} transaction priority with tip={:?}: {}. Target priority: {}",
+			C::NAME,
+			unsigned_tx.tip,
+			current_priority,
+			target_priority,
+		);
+
+		unsigned_tx.tip = next_tip;
+		current_priority = client
+			.validate_transaction(
+				at_block,
+				S::sign_transaction(
+					*client.genesis_hash(),
+					key_pair,
+					relay_substrate_client::TransactionEra::immortal(),
+					unsigned_tx.clone(),
+				),
+			)
+			.await??
+			.priority;
+	}
+
+	log::debug!(
+		target: "bridge",
+		"{} transaction tip has changed from {:?} to {:?}",
+		C::NAME,
+		old_tip,
+		unsigned_tx.tip,
+	);
+
+	Ok((
+		old_tip != unsigned_tx.tip,
+		S::sign_transaction(
+			*client.genesis_hash(),
+			key_pair,
+			relay_substrate_client::TransactionEra::immortal(),
+			unsigned_tx,
+		),
+	))
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use bp_rialto::Hash;
+	use relay_rialto_client::Rialto;
+
+	fn context() -> Context<Rialto> {
+		Context {
+			strategy: PrioritySelectionStrategy::MakeItBestTransaction,
+			best_header: HeaderOf::<Rialto>::new(
+				Default::default(),
+				Default::default(),
+				Default::default(),
+				Default::default(),
+				Default::default(),
+			),
+			transaction: None,
+			resubmitted: 0,
+			stalled_for: Zero::zero(),
+			stalled_for_limit: 3,
+			tip_step: 100,
+			tip_limit: 1000,
+		}
+	}
+
+	#[test]
+	fn context_works() {
+		let mut context = context();
+
+		// when transaction is noticed 2/3 times, it isn't stalled
+		context = context.notice_transaction(Default::default());
+		assert!(!context.is_stalled());
+		assert_eq!(context.stalled_for, 1);
+		assert_eq!(context.resubmitted, 0);
+		context = context.notice_transaction(Default::default());
+		assert!(!context.is_stalled());
+		assert_eq!(context.stalled_for, 2);
+		assert_eq!(context.resubmitted, 0);
+
+		// when transaction is noticed for 3rd time in a row, it is considered stalled
+		context = context.notice_transaction(Default::default());
+		assert!(context.is_stalled());
+		assert_eq!(context.stalled_for, 3);
+		assert_eq!(context.resubmitted, 0);
+
+		// and after we resubmit it, we forget previous transaction
+		context = context.notice_resubmitted_transaction(Hash::from([1; 32]));
+		assert_eq!(context.transaction, Some(Hash::from([1; 32])));
+		assert_eq!(context.resubmitted, 1);
+		assert_eq!(context.stalled_for, 0);
+	}
+
+	#[test]
+	fn select_transaction_from_queue_works_with_empty_queue() {
+		assert_eq!(select_transaction_from_queue(vec![], &context()), None);
+	}
+
+	#[test]
+	fn select_transaction_from_queue_works() {
+		let mut context = context();
+		let queued_transactions = vec![
+			Bytes(vec![1]),
+			Bytes(vec![2]),
+			Bytes(vec![3]),
+			Bytes(vec![4]),
+			Bytes(vec![5]),
+			Bytes(vec![6]),
+		];
+
+		// when we resubmit tx for the first time, 1/2 of queue is skipped
+		assert_eq!(
+			select_transaction_from_queue(queued_transactions.clone(), &context),
+			Some(Bytes(vec![4])),
+		);
+
+		// when we resubmit tx for the second time, 1/3 of queue is skipped
+		context = context.notice_resubmitted_transaction(Hash::from([1; 32]));
+		assert_eq!(
+			select_transaction_from_queue(queued_transactions.clone(), &context),
+			Some(Bytes(vec![3])),
+		);
+
+		// when we resubmit tx for the third time, 1/5 of queue is skipped
+		context = context.notice_resubmitted_transaction(Hash::from([2; 32]));
+		assert_eq!(
+			select_transaction_from_queue(queued_transactions.clone(), &context),
+			Some(Bytes(vec![2])),
+		);
+
+		// when we resubmit tx for the second time, 1/9 of queue is skipped
+		context = context.notice_resubmitted_transaction(Hash::from([3; 32]));
+		assert_eq!(
+			select_transaction_from_queue(queued_transactions, &context),
+			Some(Bytes(vec![1])),
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs
index f710f814e41d981756555a9f305cdf6a25a48bf6..3e77ad8342927bdfc5a16f85b72098d22ec246a4 100644
--- a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs
@@ -14,46 +14,71 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::cli::bridge::FullBridge;
-use crate::cli::encode_call::{self, CliEncodeCall};
-use crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee;
 use crate::cli::{
-	Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, SourceSigningParams,
-	TargetSigningParams,
+	bridge::FullBridge,
+	encode_call::{self, CliEncodeCall},
+	estimate_fee::estimate_message_delivery_and_dispatch_fee,
+	Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams,
+	SourceSigningParams, TargetSigningParams,
 };
 use bp_message_dispatch::{CallOrigin, MessagePayload};
-use bp_runtime::messages::DispatchFeePayment;
+use bp_runtime::BalanceOf;
 use codec::Encode;
 use frame_support::weights::Weight;
-use relay_substrate_client::{Chain, TransactionSignScheme};
+use relay_substrate_client::{Chain, TransactionSignScheme, UnsignedTransaction};
 use sp_core::{Bytes, Pair};
 use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner};
 use std::fmt::Debug;
 use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+/// Relayer operating mode.
+#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq, Eq)]
+#[strum(serialize_all = "kebab_case")]
+pub enum DispatchFeePayment {
+	/// The dispatch fee is paid at the source chain.
+	AtSourceChain,
+	/// The dispatch fee is paid at the target chain.
+	AtTargetChain,
+}
+
+impl From<DispatchFeePayment> for bp_runtime::messages::DispatchFeePayment {
+	fn from(dispatch_fee_payment: DispatchFeePayment) -> Self {
+		match dispatch_fee_payment {
+			DispatchFeePayment::AtSourceChain => Self::AtSourceChain,
+			DispatchFeePayment::AtTargetChain => Self::AtTargetChain,
+		}
+	}
+}
 
 /// Send bridge message.
 #[derive(StructOpt)]
 pub struct SendMessage {
 	/// A bridge instance to encode call for.
-	#[structopt(possible_values = &FullBridge::variants(), case_insensitive = true)]
+	#[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)]
 	bridge: FullBridge,
 	#[structopt(flatten)]
 	source: SourceConnectionParams,
 	#[structopt(flatten)]
 	source_sign: SourceSigningParams,
-	/// The SURI of secret key to use when transactions are submitted to the Target node.
-	#[structopt(long, required_if("origin", "Target"))]
-	target_signer: Option<String>,
-	/// The password for the SURI of secret key to use when transactions are submitted to the Target node.
-	#[structopt(long)]
-	target_signer_password: Option<String>,
+	#[structopt(flatten)]
+	target_sign: TargetSigningParams,
 	/// Hex-encoded lane id. Defaults to `00000000`.
 	#[structopt(long, default_value = "00000000")]
 	lane: HexLaneId,
+	/// Where dispatch fee is paid?
+	#[structopt(
+		long,
+		possible_values = DispatchFeePayment::VARIANTS,
+		case_insensitive = true,
+		default_value = "at-source-chain",
+	)]
+	dispatch_fee_payment: DispatchFeePayment,
 	/// Dispatch weight of the message. If not passed, determined automatically.
 	#[structopt(long)]
 	dispatch_weight: Option<ExplicitOrMaximal<Weight>>,
-	/// Delivery and dispatch fee in source chain base currency units. If not passed, determined automatically.
+	/// Delivery and dispatch fee in source chain base currency units. If not passed, determined
+	/// automatically.
 	#[structopt(long)]
 	fee: Option<Balance>,
 	/// Message type.
@@ -72,9 +97,9 @@ impl SendMessage {
 		crate::select_full_bridge!(self.bridge, {
 			let SendMessage {
 				source_sign,
-				target_signer,
-				target_signer_password,
+				target_sign,
 				ref mut message,
+				dispatch_fee_payment,
 				dispatch_weight,
 				origin,
 				bridge,
@@ -101,12 +126,6 @@ impl SendMessage {
 					match origin {
 						Origins::Source => CallOrigin::SourceAccount(source_account_id),
 						Origins::Target => {
-							let target_sign = TargetSigningParams {
-								target_signer: target_signer.clone().ok_or_else(|| {
-									anyhow::format_err!("The argument target_signer is not available")
-								})?,
-								target_signer_password: target_signer_password.clone(),
-							};
 							let target_sign = target_sign.to_keypair::<Target>()?;
 							let digest = account_ownership_digest(
 								&target_call,
@@ -120,9 +139,10 @@ impl SendMessage {
 								target_origin_public.into(),
 								digest_signature.into(),
 							)
-						}
+						},
 					},
 					&target_call,
+					*dispatch_fee_payment,
 				)
 			};
 			Ok(payload)
@@ -141,7 +161,7 @@ impl SendMessage {
 			let fee = match self.fee {
 				Some(fee) => fee,
 				None => Balance(
-					estimate_message_delivery_and_dispatch_fee::<<Source as Chain>::Balance, _, _>(
+					estimate_message_delivery_and_dispatch_fee::<BalanceOf<Source>, _, _>(
 						&source_client,
 						ESTIMATE_MESSAGE_FEE_METHOD,
 						lane,
@@ -158,24 +178,46 @@ impl SendMessage {
 				fee,
 			})?;
 
+			let source_genesis_hash = *source_client.genesis_hash();
+			let estimated_transaction_fee = source_client
+				.estimate_extrinsic_fee(Bytes(
+					Source::sign_transaction(
+						source_genesis_hash,
+						&source_sign,
+						relay_substrate_client::TransactionEra::immortal(),
+						UnsignedTransaction::new(send_message_call.clone(), 0),
+					)
+					.encode(),
+				))
+				.await?;
 			source_client
-				.submit_signed_extrinsic(source_sign.public().into(), |transaction_nonce| {
+				.submit_signed_extrinsic(source_sign.public().into(), move |_, transaction_nonce| {
 					let signed_source_call = Source::sign_transaction(
-						*source_client.genesis_hash(),
+						source_genesis_hash,
 						&source_sign,
-						transaction_nonce,
-						send_message_call,
+						relay_substrate_client::TransactionEra::immortal(),
+						UnsignedTransaction::new(send_message_call, transaction_nonce),
 					)
 					.encode();
 
 					log::info!(
 						target: "bridge",
-						"Sending message to {}. Size: {}. Dispatch weight: {}. Fee: {}",
+						"Sending message to {}. Lane: {:?}. Size: {}. Dispatch weight: {}. Fee: {}",
 						Target::NAME,
+						lane,
 						signed_source_call.len(),
 						dispatch_weight,
 						fee,
 					);
+					log::info!(
+						target: "bridge",
+						"The source account ({:?}) balance will be reduced by (at most) {} (message fee) + {} (tx fee	) = {} {} tokens",
+						AccountId32::from(source_sign.public()),
+						fee.0,
+						estimated_transaction_fee.inclusion_fee(),
+						fee.0.saturating_add(estimated_transaction_fee.inclusion_fee() as _),
+						Source::NAME,
+					);
 					log::info!(
 						target: "bridge",
 						"Signed {} Call: {:?}",
@@ -197,10 +239,7 @@ fn prepare_call_dispatch_weight(
 	weight_from_pre_dispatch_call: ExplicitOrMaximal<Weight>,
 	maximal_allowed_weight: Weight,
 ) -> Weight {
-	match user_specified_dispatch_weight
-		.clone()
-		.unwrap_or(weight_from_pre_dispatch_call)
-	{
+	match user_specified_dispatch_weight.clone().unwrap_or(weight_from_pre_dispatch_call) {
 		ExplicitOrMaximal::Explicit(weight) => weight,
 		ExplicitOrMaximal::Maximal => maximal_allowed_weight,
 	}
@@ -211,6 +250,7 @@ pub(crate) fn message_payload<SAccountId, TPublic, TSignature>(
 	weight: Weight,
 	origin: CallOrigin<SAccountId, TPublic, TSignature>,
 	call: &impl Encode,
+	dispatch_fee_payment: DispatchFeePayment,
 ) -> MessagePayload<SAccountId, TPublic, TSignature, Vec<u8>>
 where
 	SAccountId: Encode + Debug,
@@ -222,7 +262,7 @@ where
 		spec_version,
 		weight,
 		origin,
-		dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
+		dispatch_fee_payment: dispatch_fee_payment.into(),
 		call: HexBytes::encode(call),
 	};
 
@@ -230,24 +270,14 @@ where
 	log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload));
 
 	// re-pack to return `Vec<u8>`
-	let MessagePayload {
-		spec_version,
-		weight,
-		origin,
-		dispatch_fee_payment,
-		call,
-	} = payload;
-	MessagePayload {
-		spec_version,
-		weight,
-		origin,
-		dispatch_fee_payment,
-		call: call.0,
-	}
+	let MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call } = payload;
+	MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call: call.0 }
 }
 
 pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight {
-	bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight)
+	bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight(
+		maximal_extrinsic_weight,
+	)
 }
 
 #[cfg(test)]
@@ -260,7 +290,7 @@ mod tests {
 		// given
 		let mut send_message = SendMessage::from_iter(vec![
 			"send-message",
-			"RialtoToMillau",
+			"rialto-to-millau",
 			"--source-port",
 			"1234",
 			"--source-signer",
@@ -278,10 +308,12 @@ mod tests {
 			payload,
 			MessagePayload {
 				spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version,
-				weight: 1038000,
-				origin: CallOrigin::SourceAccount(sp_keyring::AccountKeyring::Alice.to_account_id()),
-				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
-				call: hex!("0401081234").to_vec(),
+				weight: 576000,
+				origin: CallOrigin::SourceAccount(
+					sp_keyring::AccountKeyring::Alice.to_account_id()
+				),
+				dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain,
+				call: hex!("0001081234").to_vec(),
 			}
 		);
 	}
@@ -291,7 +323,7 @@ mod tests {
 		// given
 		let mut send_message = SendMessage::from_iter(vec![
 			"send-message",
-			"MillauToRialto",
+			"millau-to-rialto",
 			"--source-port",
 			"1234",
 			"--source-signer",
@@ -318,24 +350,24 @@ mod tests {
 			payload,
 			MessagePayload {
 				spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version,
-				weight: 1038000,
+				weight: 576000,
 				origin: CallOrigin::TargetAccount(
 					sp_keyring::AccountKeyring::Alice.to_account_id(),
 					sp_keyring::AccountKeyring::Bob.into(),
 					signature,
 				),
-				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
-				call: hex!("0701081234").to_vec(),
+				dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain,
+				call: hex!("0001081234").to_vec(),
 			}
 		);
 	}
 
 	#[test]
-	fn target_signer_must_exist_if_origin_is_target() {
+	fn accepts_send_message_command_without_target_sign_options() {
 		// given
 		let send_message = SendMessage::from_iter_safe(vec![
 			"send-message",
-			"MillauToRialto",
+			"rialto-to-millau",
 			"--source-port",
 			"1234",
 			"--source-signer",
@@ -347,6 +379,31 @@ mod tests {
 			"1234",
 		]);
 
-		assert!(send_message.is_err());
+		assert!(send_message.is_ok());
+	}
+
+	#[test]
+	fn accepts_non_default_dispatch_fee_payment() {
+		// given
+		let mut send_message = SendMessage::from_iter(vec![
+			"send-message",
+			"rialto-to-millau",
+			"--source-port",
+			"1234",
+			"--source-signer",
+			"//Alice",
+			"--dispatch-fee-payment",
+			"at-target-chain",
+			"remark",
+		]);
+
+		// when
+		let payload = send_message.encode_payload().unwrap();
+
+		// then
+		assert_eq!(
+			payload.dispatch_fee_payment,
+			bp_runtime::messages::DispatchFeePayment::AtTargetChain
+		);
 	}
 }
diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs b/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs
new file mode 100644
index 0000000000000000000000000000000000000000..aa3996aa41364ae9336d3763d83d9c2248668ea0
--- /dev/null
+++ b/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs
@@ -0,0 +1,798 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Tokens swap using token-swap bridge pallet.
+
+// TokenSwapBalances fields are never directly accessed, but the whole struct is printed
+// to show token swap progress
+#![allow(dead_code)]
+
+use codec::Encode;
+use num_traits::One;
+use rand::random;
+use structopt::StructOpt;
+use strum::{EnumString, EnumVariantNames, VariantNames};
+
+use frame_support::dispatch::GetDispatchInfo;
+use relay_substrate_client::{
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, CallOf, Chain, ChainWithBalances,
+	Client, Error as SubstrateError, HashOf, SignatureOf, Subscription, TransactionSignScheme,
+	TransactionStatusOf, UnsignedTransaction,
+};
+use sp_core::{blake2_256, storage::StorageKey, Bytes, Pair, H256, U256};
+use sp_runtime::traits::{Convert, Header as HeaderT};
+
+use crate::cli::{
+	Balance, CliChain, SourceConnectionParams, SourceSigningParams, TargetConnectionParams,
+	TargetSigningParams,
+};
+
+/// Swap tokens.
+#[derive(StructOpt, Debug, PartialEq)]
+pub struct SwapTokens {
+	/// A bridge instance to use in token swap.
+	#[structopt(possible_values = SwapTokensBridge::VARIANTS, case_insensitive = true)]
+	bridge: SwapTokensBridge,
+
+	#[structopt(flatten)]
+	source: SourceConnectionParams,
+	#[structopt(flatten)]
+	source_sign: SourceSigningParams,
+
+	#[structopt(flatten)]
+	target: TargetConnectionParams,
+	#[structopt(flatten)]
+	target_sign: TargetSigningParams,
+
+	#[structopt(subcommand)]
+	swap_type: TokenSwapType,
+	/// Source chain balance that source signer wants to swap.
+	#[structopt(long)]
+	source_balance: Balance,
+	/// Target chain balance that target signer wants to swap.
+	#[structopt(long)]
+	target_balance: Balance,
+}
+
+/// Token swap type.
+#[derive(StructOpt, Debug, PartialEq, Eq, Clone)]
+pub enum TokenSwapType {
+	/// The `target_sign` is temporary and only have funds for single swap.
+	NoLock,
+	/// This swap type prevents `source_signer` from restarting the swap after it has been
+	/// completed.
+	LockUntilBlock {
+		/// Number of blocks before the swap expires.
+		#[structopt(long)]
+		blocks_before_expire: u32,
+		/// Unique swap nonce.
+		#[structopt(long)]
+		swap_nonce: Option<U256>,
+	},
+}
+
+/// Swap tokens bridge.
+#[derive(Debug, EnumString, EnumVariantNames, PartialEq)]
+#[strum(serialize_all = "kebab_case")]
+pub enum SwapTokensBridge {
+	/// Use token-swap pallet deployed at Millau to swap tokens with Rialto.
+	MillauToRialto,
+}
+
+macro_rules! select_bridge {
+	($bridge: expr, $generic: tt) => {
+		match $bridge {
+			SwapTokensBridge::MillauToRialto => {
+				type Source = relay_millau_client::Millau;
+				type Target = relay_rialto_client::Rialto;
+
+				type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter;
+
+				use bp_millau::{
+					derive_account_from_rialto_id as derive_source_account_from_target_account,
+					TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD,
+					WITH_RIALTO_TOKEN_SWAP_PALLET_NAME as TOKEN_SWAP_PALLET_NAME,
+				};
+				use bp_rialto::{
+					derive_account_from_millau_id as derive_target_account_from_source_account,
+					TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD,
+				};
+
+				const SOURCE_CHAIN_ID: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID;
+				const TARGET_CHAIN_ID: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID;
+
+				const SOURCE_SPEC_VERSION: u32 = millau_runtime::VERSION.spec_version;
+				const TARGET_SPEC_VERSION: u32 = rialto_runtime::VERSION.spec_version;
+
+				const SOURCE_TO_TARGET_LANE_ID: bp_messages::LaneId = *b"swap";
+				const TARGET_TO_SOURCE_LANE_ID: bp_messages::LaneId = [0, 0, 0, 0];
+
+				$generic
+			},
+		}
+	};
+}
+
+impl SwapTokens {
+	/// Run the command.
+	pub async fn run(self) -> anyhow::Result<()> {
+		select_bridge!(self.bridge, {
+			let source_client = self.source.to_client::<Source>().await?;
+			let source_sign = self.source_sign.to_keypair::<Target>()?;
+			let target_client = self.target.to_client::<Target>().await?;
+			let target_sign = self.target_sign.to_keypair::<Target>()?;
+
+			// names of variables in this function are matching names used by the
+			// `pallet-bridge-token-swap`
+
+			// prepare token swap intention
+			let token_swap = self
+				.prepare_token_swap::<Source, Target>(&source_client, &source_sign, &target_sign)
+				.await?;
+
+			// group all accounts that will be used later
+			let accounts = TokenSwapAccounts {
+				source_account_at_bridged_chain: derive_target_account_from_source_account(
+					bp_runtime::SourceAccount::Account(
+						token_swap.source_account_at_this_chain.clone(),
+					),
+				),
+				target_account_at_this_chain: derive_source_account_from_target_account(
+					bp_runtime::SourceAccount::Account(
+						token_swap.target_account_at_bridged_chain.clone(),
+					),
+				),
+				source_account_at_this_chain: token_swap.source_account_at_this_chain.clone(),
+				target_account_at_bridged_chain: token_swap.target_account_at_bridged_chain.clone(),
+				swap_account: FromSwapToThisAccountIdConverter::convert(
+					token_swap.using_encoded(blake2_256).into(),
+				),
+			};
+
+			// account balances are used to demonstrate what's happening :)
+			let initial_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
+
+			// before calling something that may fail, log what we're trying to do
+			log::info!(target: "bridge", "Starting swap: {:?}", token_swap);
+			log::info!(target: "bridge", "Swap accounts: {:?}", accounts);
+			log::info!(target: "bridge", "Initial account balances: {:?}", initial_balances);
+
+			//
+			// Step 1: swap is created
+			//
+
+			// prepare `Currency::transfer` call that will happen at the target chain
+			let bridged_currency_transfer: CallOf<Target> = pallet_balances::Call::transfer {
+				dest: accounts.source_account_at_bridged_chain.clone().into(),
+				value: token_swap.target_balance_at_bridged_chain,
+			}
+			.into();
+			let bridged_currency_transfer_weight =
+				bridged_currency_transfer.get_dispatch_info().weight;
+
+			// sign message
+			let bridged_chain_spec_version = TARGET_SPEC_VERSION;
+			let signature_payload = pallet_bridge_dispatch::account_ownership_digest(
+				&bridged_currency_transfer,
+				&accounts.swap_account,
+				&bridged_chain_spec_version,
+				SOURCE_CHAIN_ID,
+				TARGET_CHAIN_ID,
+			);
+			let bridged_currency_transfer_signature: SignatureOf<Target> =
+				target_sign.sign(&signature_payload).into();
+
+			// prepare `create_swap` call
+			let target_public_at_bridged_chain: AccountPublicOf<Target> =
+				target_sign.public().into();
+			let swap_delivery_and_dispatch_fee: BalanceOf<Source> =
+				crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee(
+					&source_client,
+					ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD,
+					SOURCE_TO_TARGET_LANE_ID,
+					bp_message_dispatch::MessagePayload {
+						spec_version: TARGET_SPEC_VERSION,
+						weight: bridged_currency_transfer_weight,
+						origin: bp_message_dispatch::CallOrigin::TargetAccount(
+							accounts.swap_account.clone(),
+							target_public_at_bridged_chain.clone(),
+							bridged_currency_transfer_signature.clone(),
+						),
+						dispatch_fee_payment:
+							bp_runtime::messages::DispatchFeePayment::AtTargetChain,
+						call: bridged_currency_transfer.encode(),
+					},
+				)
+				.await?;
+			let create_swap_call: CallOf<Source> = pallet_bridge_token_swap::Call::create_swap {
+				swap: token_swap.clone(),
+				swap_creation_params: Box::new(bp_token_swap::TokenSwapCreation {
+					target_public_at_bridged_chain,
+					swap_delivery_and_dispatch_fee,
+					bridged_chain_spec_version,
+					bridged_currency_transfer: bridged_currency_transfer.encode(),
+					bridged_currency_transfer_weight,
+					bridged_currency_transfer_signature,
+				}),
+			}
+			.into();
+
+			// start tokens swap
+			let source_genesis_hash = *source_client.genesis_hash();
+			let create_swap_signer = source_sign.clone();
+			let swap_created_at = wait_until_transaction_is_finalized::<Source>(
+				source_client
+					.submit_and_watch_signed_extrinsic(
+						accounts.source_account_at_this_chain.clone(),
+						move |_, transaction_nonce| {
+							Bytes(
+								Source::sign_transaction(
+									source_genesis_hash,
+									&create_swap_signer,
+									relay_substrate_client::TransactionEra::immortal(),
+									UnsignedTransaction::new(create_swap_call, transaction_nonce),
+								)
+								.encode(),
+							)
+						},
+					)
+					.await?,
+			)
+			.await?;
+
+			// read state of swap after it has been created
+			let token_swap_hash: H256 = token_swap.using_encoded(blake2_256).into();
+			let token_swap_storage_key = bp_runtime::storage_map_final_key_identity(
+				TOKEN_SWAP_PALLET_NAME,
+				pallet_bridge_token_swap::PENDING_SWAPS_MAP_NAME,
+				token_swap_hash.as_ref(),
+			);
+			match read_token_swap_state(&source_client, swap_created_at, &token_swap_storage_key)
+				.await?
+			{
+				Some(bp_token_swap::TokenSwapState::Started) => {
+					log::info!(target: "bridge", "Swap has been successfully started");
+					let intermediate_balances =
+						read_account_balances(&accounts, &source_client, &target_client).await?;
+					log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances);
+				},
+				Some(token_swap_state) =>
+					return Err(anyhow::format_err!(
+						"Fresh token swap has unexpected state: {:?}",
+						token_swap_state,
+					)),
+				None => return Err(anyhow::format_err!("Failed to start token swap")),
+			};
+
+			//
+			// Step 2: message is being relayed to the target chain and dispathed there
+			//
+
+			// wait until message is dispatched at the target chain and dispatch result delivered
+			// back to source chain
+			let token_swap_state = wait_until_token_swap_state_is_changed(
+				&source_client,
+				&token_swap_storage_key,
+				bp_token_swap::TokenSwapState::Started,
+			)
+			.await?;
+			let is_transfer_succeeded = match token_swap_state {
+				Some(bp_token_swap::TokenSwapState::Started) => {
+					unreachable!("wait_until_token_swap_state_is_changed only returns if state is not Started; qed",)
+				},
+				None =>
+					return Err(anyhow::format_err!("Fresh token swap has disappeared unexpectedly")),
+				Some(bp_token_swap::TokenSwapState::Confirmed) => {
+					log::info!(
+						target: "bridge",
+						"Transfer has been successfully dispatched at the target chain. Swap can be claimed",
+					);
+					true
+				},
+				Some(bp_token_swap::TokenSwapState::Failed) => {
+					log::info!(
+						target: "bridge",
+						"Transfer has been dispatched with an error at the target chain. Swap can be canceled",
+					);
+					false
+				},
+			};
+
+			// by this time: (1) token swap account has been created and (2) if transfer has been
+			// successfully dispatched, both target chain balances have changed
+			let intermediate_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
+			log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances);
+
+			// transfer has been dispatched, but we may need to wait until block where swap can be
+			// claimed/canceled
+			if let bp_token_swap::TokenSwapType::LockClaimUntilBlock(
+				ref last_available_block_number,
+				_,
+			) = token_swap.swap_type
+			{
+				wait_until_swap_unlocked(
+					&source_client,
+					last_available_block_number + BlockNumberOf::<Source>::one(),
+				)
+				.await?;
+			}
+
+			//
+			// Step 3: we may now claim or cancel the swap
+			//
+
+			if is_transfer_succeeded {
+				log::info!(target: "bridge", "Claiming the swap swap");
+
+				// prepare `claim_swap` message that will be sent over the bridge
+				let claim_swap_call: CallOf<Source> =
+					pallet_bridge_token_swap::Call::claim_swap { swap: token_swap }.into();
+				let claim_swap_message = bp_message_dispatch::MessagePayload {
+					spec_version: SOURCE_SPEC_VERSION,
+					weight: claim_swap_call.get_dispatch_info().weight,
+					origin: bp_message_dispatch::CallOrigin::SourceAccount(
+						accounts.target_account_at_bridged_chain.clone(),
+					),
+					dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain,
+					call: claim_swap_call.encode(),
+				};
+				let claim_swap_delivery_and_dispatch_fee: BalanceOf<Target> =
+					crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee(
+						&target_client,
+						ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD,
+						TARGET_TO_SOURCE_LANE_ID,
+						claim_swap_message.clone(),
+					)
+					.await?;
+				let send_message_call: CallOf<Target> =
+					pallet_bridge_messages::Call::send_message {
+						lane_id: TARGET_TO_SOURCE_LANE_ID,
+						payload: claim_swap_message,
+						delivery_and_dispatch_fee: claim_swap_delivery_and_dispatch_fee,
+					}
+					.into();
+
+				// send `claim_swap` message
+				let target_genesis_hash = *target_client.genesis_hash();
+				let _ = wait_until_transaction_is_finalized::<Target>(
+					target_client
+						.submit_and_watch_signed_extrinsic(
+							accounts.target_account_at_bridged_chain.clone(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Target::sign_transaction(
+										target_genesis_hash,
+										&target_sign,
+										relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											send_message_call,
+											transaction_nonce,
+										),
+									)
+									.encode(),
+								)
+							},
+						)
+						.await?,
+				)
+				.await?;
+
+				// wait until swap state is updated
+				let token_swap_state = wait_until_token_swap_state_is_changed(
+					&source_client,
+					&token_swap_storage_key,
+					bp_token_swap::TokenSwapState::Confirmed,
+				)
+				.await?;
+				if token_swap_state != None {
+					return Err(anyhow::format_err!(
+						"Confirmed token swap state has been changed to {:?} unexpectedly"
+					))
+				}
+			} else {
+				log::info!(target: "bridge", "Cancelling the swap");
+				let cancel_swap_call: CallOf<Source> =
+					pallet_bridge_token_swap::Call::cancel_swap { swap: token_swap.clone() }.into();
+				let _ = wait_until_transaction_is_finalized::<Source>(
+					source_client
+						.submit_and_watch_signed_extrinsic(
+							accounts.source_account_at_this_chain.clone(),
+							move |_, transaction_nonce| {
+								Bytes(
+									Source::sign_transaction(
+										source_genesis_hash,
+										&source_sign,
+										relay_substrate_client::TransactionEra::immortal(),
+										UnsignedTransaction::new(
+											cancel_swap_call,
+											transaction_nonce,
+										),
+									)
+									.encode(),
+								)
+							},
+						)
+						.await?,
+				)
+				.await?;
+			}
+
+			// print final balances
+			let final_balances =
+				read_account_balances(&accounts, &source_client, &target_client).await?;
+			log::info!(target: "bridge", "Final account balances: {:?}", final_balances);
+
+			Ok(())
+		})
+	}
+
+	/// Prepare token swap intention.
+	async fn prepare_token_swap<Source: CliChain, Target: CliChain>(
+		&self,
+		source_client: &Client<Source>,
+		source_sign: &Source::KeyPair,
+		target_sign: &Target::KeyPair,
+	) -> anyhow::Result<
+		bp_token_swap::TokenSwap<
+			BlockNumberOf<Source>,
+			BalanceOf<Source>,
+			AccountIdOf<Source>,
+			BalanceOf<Target>,
+			AccountIdOf<Target>,
+		>,
+	>
+	where
+		AccountIdOf<Source>: From<<Source::KeyPair as Pair>::Public>,
+		AccountIdOf<Target>: From<<Target::KeyPair as Pair>::Public>,
+		BalanceOf<Source>: From<u64>,
+		BalanceOf<Target>: From<u64>,
+	{
+		// accounts that are directly controlled by participants
+		let source_account_at_this_chain: AccountIdOf<Source> = source_sign.public().into();
+		let target_account_at_bridged_chain: AccountIdOf<Target> = target_sign.public().into();
+
+		// balances that we're going to swap
+		let source_balance_at_this_chain: BalanceOf<Source> = self.source_balance.cast().into();
+		let target_balance_at_bridged_chain: BalanceOf<Target> = self.target_balance.cast().into();
+
+		// prepare token swap intention
+		Ok(bp_token_swap::TokenSwap {
+			swap_type: self.prepare_token_swap_type(source_client).await?,
+			source_balance_at_this_chain,
+			source_account_at_this_chain: source_account_at_this_chain.clone(),
+			target_balance_at_bridged_chain,
+			target_account_at_bridged_chain: target_account_at_bridged_chain.clone(),
+		})
+	}
+
+	/// Prepare token swap type.
+	async fn prepare_token_swap_type<Source: Chain>(
+		&self,
+		source_client: &Client<Source>,
+	) -> anyhow::Result<bp_token_swap::TokenSwapType<BlockNumberOf<Source>>> {
+		match self.swap_type {
+			TokenSwapType::NoLock =>
+				Ok(bp_token_swap::TokenSwapType::TemporaryTargetAccountAtBridgedChain),
+			TokenSwapType::LockUntilBlock { blocks_before_expire, ref swap_nonce } => {
+				let blocks_before_expire: BlockNumberOf<Source> = blocks_before_expire.into();
+				let current_source_block_number = *source_client.best_header().await?.number();
+				Ok(bp_token_swap::TokenSwapType::LockClaimUntilBlock(
+					current_source_block_number + blocks_before_expire,
+					swap_nonce.unwrap_or_else(|| {
+						U256::from(random::<u128>()).overflowing_mul(U256::from(random::<u128>())).0
+					}),
+				))
+			},
+		}
+	}
+}
+
+/// Accounts that are participating in the swap.
+#[derive(Debug)]
+struct TokenSwapAccounts<ThisAccountId, BridgedAccountId> {
+	source_account_at_this_chain: ThisAccountId,
+	source_account_at_bridged_chain: BridgedAccountId,
+	target_account_at_bridged_chain: BridgedAccountId,
+	target_account_at_this_chain: ThisAccountId,
+	swap_account: ThisAccountId,
+}
+
+/// Swap accounts balances.
+#[derive(Debug)]
+struct TokenSwapBalances<ThisBalance, BridgedBalance> {
+	source_account_at_this_chain_balance: Option<ThisBalance>,
+	source_account_at_bridged_chain_balance: Option<BridgedBalance>,
+	target_account_at_bridged_chain_balance: Option<BridgedBalance>,
+	target_account_at_this_chain_balance: Option<ThisBalance>,
+	swap_account_balance: Option<ThisBalance>,
+}
+
+/// Read swap accounts balances.
+async fn read_account_balances<Source: ChainWithBalances, Target: ChainWithBalances>(
+	accounts: &TokenSwapAccounts<AccountIdOf<Source>, AccountIdOf<Target>>,
+	source_client: &Client<Source>,
+	target_client: &Client<Target>,
+) -> anyhow::Result<TokenSwapBalances<BalanceOf<Source>, BalanceOf<Target>>> {
+	Ok(TokenSwapBalances {
+		source_account_at_this_chain_balance: read_account_balance(
+			source_client,
+			&accounts.source_account_at_this_chain,
+		)
+		.await?,
+		source_account_at_bridged_chain_balance: read_account_balance(
+			target_client,
+			&accounts.source_account_at_bridged_chain,
+		)
+		.await?,
+		target_account_at_bridged_chain_balance: read_account_balance(
+			target_client,
+			&accounts.target_account_at_bridged_chain,
+		)
+		.await?,
+		target_account_at_this_chain_balance: read_account_balance(
+			source_client,
+			&accounts.target_account_at_this_chain,
+		)
+		.await?,
+		swap_account_balance: read_account_balance(source_client, &accounts.swap_account).await?,
+	})
+}
+
+/// Read account balance.
+async fn read_account_balance<C: ChainWithBalances>(
+	client: &Client<C>,
+	account: &AccountIdOf<C>,
+) -> anyhow::Result<Option<BalanceOf<C>>> {
+	match client.free_native_balance(account.clone()).await {
+		Ok(balance) => Ok(Some(balance)),
+		Err(SubstrateError::AccountDoesNotExist) => Ok(None),
+		Err(error) => Err(anyhow::format_err!(
+			"Failed to read balance of {} account {:?}: {:?}",
+			C::NAME,
+			account,
+			error,
+		)),
+	}
+}
+
+/// Wait until transaction is included into finalized block.
+///
+/// Returns the hash of the finalized block with transaction.
+pub(crate) async fn wait_until_transaction_is_finalized<C: Chain>(
+	subscription: Subscription<TransactionStatusOf<C>>,
+) -> anyhow::Result<HashOf<C>> {
+	loop {
+		let transaction_status = subscription.next().await?;
+		match transaction_status {
+			Some(TransactionStatusOf::<C>::FinalityTimeout(_)) |
+			Some(TransactionStatusOf::<C>::Usurped(_)) |
+			Some(TransactionStatusOf::<C>::Dropped) |
+			Some(TransactionStatusOf::<C>::Invalid) |
+			None =>
+				return Err(anyhow::format_err!(
+					"We've been waiting for finalization of {} transaction, but it now has the {:?} status",
+					C::NAME,
+					transaction_status,
+				)),
+			Some(TransactionStatusOf::<C>::Finalized(block_hash)) => {
+				log::trace!(
+					target: "bridge",
+					"{} transaction has been finalized at block {}",
+					C::NAME,
+					block_hash,
+				);
+				return Ok(block_hash)
+			},
+			_ => {
+				log::trace!(
+					target: "bridge",
+					"Received intermediate status of {} transaction: {:?}",
+					C::NAME,
+					transaction_status,
+				);
+			},
+		}
+	}
+}
+
+/// Waits until token swap state is changed from `Started` to something else.
+async fn wait_until_token_swap_state_is_changed<C: Chain>(
+	client: &Client<C>,
+	swap_state_storage_key: &StorageKey,
+	previous_token_swap_state: bp_token_swap::TokenSwapState,
+) -> anyhow::Result<Option<bp_token_swap::TokenSwapState>> {
+	log::trace!(target: "bridge", "Waiting for token swap state change");
+	loop {
+		async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await;
+
+		let best_block = client.best_finalized_header_number().await?;
+		let best_block_hash = client.block_hash_by_number(best_block).await?;
+		log::trace!(target: "bridge", "Inspecting {} block {}/{}", C::NAME, best_block, best_block_hash);
+
+		let token_swap_state =
+			read_token_swap_state(client, best_block_hash, swap_state_storage_key).await?;
+		match token_swap_state {
+			Some(new_token_swap_state) if new_token_swap_state == previous_token_swap_state => {},
+			_ => {
+				log::trace!(
+					target: "bridge",
+					"Token swap state has been changed from {:?} to {:?}",
+					previous_token_swap_state,
+					token_swap_state,
+				);
+				return Ok(token_swap_state)
+			},
+		}
+	}
+}
+
+/// Waits until swap can be claimed or canceled.
+async fn wait_until_swap_unlocked<C: Chain>(
+	client: &Client<C>,
+	required_block_number: BlockNumberOf<C>,
+) -> anyhow::Result<()> {
+	log::trace!(target: "bridge", "Waiting for token swap unlock");
+	loop {
+		async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await;
+
+		let best_block = client.best_finalized_header_number().await?;
+		let best_block_hash = client.block_hash_by_number(best_block).await?;
+		if best_block >= required_block_number {
+			return Ok(())
+		}
+
+		log::trace!(target: "bridge", "Skipping {} block {}/{}", C::NAME, best_block, best_block_hash);
+	}
+}
+
+/// Read state of the active token swap.
+async fn read_token_swap_state<C: Chain>(
+	client: &Client<C>,
+	at_block: C::Hash,
+	swap_state_storage_key: &StorageKey,
+) -> anyhow::Result<Option<bp_token_swap::TokenSwapState>> {
+	Ok(client.storage_value(swap_state_storage_key.clone(), Some(at_block)).await?)
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+
+	#[test]
+	fn swap_tokens_millau_to_rialto_no_lock() {
+		let swap_tokens = SwapTokens::from_iter(vec![
+			"swap-tokens",
+			"millau-to-rialto",
+			"--source-host",
+			"127.0.0.1",
+			"--source-port",
+			"9000",
+			"--source-signer",
+			"//Alice",
+			"--source-balance",
+			"8000000000",
+			"--target-host",
+			"127.0.0.1",
+			"--target-port",
+			"9001",
+			"--target-signer",
+			"//Bob",
+			"--target-balance",
+			"9000000000",
+			"no-lock",
+		]);
+
+		assert_eq!(
+			swap_tokens,
+			SwapTokens {
+				bridge: SwapTokensBridge::MillauToRialto,
+				source: SourceConnectionParams {
+					source_host: "127.0.0.1".into(),
+					source_port: 9000,
+					source_secure: false,
+				},
+				source_sign: SourceSigningParams {
+					source_signer: Some("//Alice".into()),
+					source_signer_password: None,
+					source_signer_file: None,
+					source_signer_password_file: None,
+					source_transactions_mortality: None,
+				},
+				target: TargetConnectionParams {
+					target_host: "127.0.0.1".into(),
+					target_port: 9001,
+					target_secure: false,
+				},
+				target_sign: TargetSigningParams {
+					target_signer: Some("//Bob".into()),
+					target_signer_password: None,
+					target_signer_file: None,
+					target_signer_password_file: None,
+					target_transactions_mortality: None,
+				},
+				swap_type: TokenSwapType::NoLock,
+				source_balance: Balance(8000000000),
+				target_balance: Balance(9000000000),
+			}
+		);
+	}
+
+	#[test]
+	fn swap_tokens_millau_to_rialto_lock_until() {
+		let swap_tokens = SwapTokens::from_iter(vec![
+			"swap-tokens",
+			"millau-to-rialto",
+			"--source-host",
+			"127.0.0.1",
+			"--source-port",
+			"9000",
+			"--source-signer",
+			"//Alice",
+			"--source-balance",
+			"8000000000",
+			"--target-host",
+			"127.0.0.1",
+			"--target-port",
+			"9001",
+			"--target-signer",
+			"//Bob",
+			"--target-balance",
+			"9000000000",
+			"lock-until-block",
+			"--blocks-before-expire",
+			"1",
+		]);
+
+		assert_eq!(
+			swap_tokens,
+			SwapTokens {
+				bridge: SwapTokensBridge::MillauToRialto,
+				source: SourceConnectionParams {
+					source_host: "127.0.0.1".into(),
+					source_port: 9000,
+					source_secure: false,
+				},
+				source_sign: SourceSigningParams {
+					source_signer: Some("//Alice".into()),
+					source_signer_password: None,
+					source_signer_file: None,
+					source_signer_password_file: None,
+					source_transactions_mortality: None,
+				},
+				target: TargetConnectionParams {
+					target_host: "127.0.0.1".into(),
+					target_port: 9001,
+					target_secure: false,
+				},
+				target_sign: TargetSigningParams {
+					target_signer: Some("//Bob".into()),
+					target_signer_password: None,
+					target_signer_file: None,
+					target_signer_password_file: None,
+					target_transactions_mortality: None,
+				},
+				swap_type: TokenSwapType::LockUntilBlock {
+					blocks_before_expire: 1,
+					swap_nonce: None,
+				},
+				source_balance: Balance(8000000000),
+				target_balance: Balance(9000000000),
+			}
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/main.rs b/polkadot/bridges/relays/bin-substrate/src/main.rs
index d119042b0d8dc01639734696f19e6381a75dc7c8..13db6beefa6a0b4e89e00e803eb313f08930c510 100644
--- a/polkadot/bridges/relays/bin-substrate/src/main.rs
+++ b/polkadot/bridges/relays/bin-substrate/src/main.rs
@@ -20,13 +20,6 @@
 
 mod chains;
 mod cli;
-mod finality_pipeline;
-mod finality_target;
-mod headers_initialize;
-mod messages_lane;
-mod messages_source;
-mod messages_target;
-mod on_demand_headers;
 
 fn main() {
 	let command = cli::parse_args();
diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs b/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs
deleted file mode 100644
index 458b08f6014ffd6bd358792a42c9fbd526dd0a22..0000000000000000000000000000000000000000
--- a/polkadot/bridges/relays/bin-substrate/src/messages_lane.rs
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019-2021 Parity Technologies (UK) Ltd.
-// This file is part of Parity Bridges Common.
-
-// Parity Bridges Common is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Parity Bridges Common is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
-
-use crate::messages_source::SubstrateMessagesProof;
-use crate::messages_target::SubstrateMessagesReceivingProof;
-use crate::on_demand_headers::OnDemandHeadersRelay;
-
-use bp_messages::{LaneId, MessageNonce};
-use frame_support::weights::Weight;
-use messages_relay::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use relay_substrate_client::{BlockNumberOf, Chain, Client, HashOf};
-use relay_utils::{metrics::MetricsParams, BlockNumberBase};
-use sp_core::Bytes;
-use std::ops::RangeInclusive;
-
-/// Substrate <-> Substrate messages relay parameters.
-pub struct MessagesRelayParams<SC: Chain, SS, TC: Chain, TS> {
-	/// Messages source client.
-	pub source_client: Client<SC>,
-	/// Sign parameters for messages source chain.
-	pub source_sign: SS,
-	/// Messages target client.
-	pub target_client: Client<TC>,
-	/// Sign parameters for messages target chain.
-	pub target_sign: TS,
-	/// Optional on-demand source to target headers relay.
-	pub source_to_target_headers_relay: Option<OnDemandHeadersRelay<SC>>,
-	/// Optional on-demand target to source headers relay.
-	pub target_to_source_headers_relay: Option<OnDemandHeadersRelay<TC>>,
-	/// Identifier of lane that needs to be served.
-	pub lane_id: LaneId,
-	/// Metrics parameters.
-	pub metrics_params: MetricsParams,
-}
-
-/// Message sync pipeline for Substrate <-> Substrate relays.
-pub trait SubstrateMessageLane: MessageLane {
-	/// Name of the runtime method that returns dispatch weight of outbound messages at the source chain.
-	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str;
-	/// Name of the runtime method that returns latest generated nonce at the source chain.
-	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str;
-	/// Name of the runtime method that returns latest received (confirmed) nonce at the the source chain.
-	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
-
-	/// Name of the runtime method that returns latest received nonce at the target chain.
-	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
-	/// Name of the runtime method that returns latest confirmed (reward-paid) nonce at the target chain.
-	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str;
-	/// Number of the runtime method that returns state of "unrewarded relayers" set at the target chain.
-	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str;
-
-	/// Name of the runtime method that returns id of best finalized source header at target chain.
-	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str;
-	/// Name of the runtime method that returns id of best finalized target header at source chain.
-	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str;
-
-	/// Source chain.
-	type SourceChain: Chain;
-	/// Target chain.
-	type TargetChain: Chain;
-
-	/// Returns id of account that we're using to sign transactions at target chain (messages proof).
-	fn target_transactions_author(&self) -> <Self::TargetChain as Chain>::AccountId;
-
-	/// Make messages delivery transaction.
-	fn make_messages_delivery_transaction(
-		&self,
-		transaction_nonce: <Self::TargetChain as Chain>::Index,
-		generated_at_header: SourceHeaderIdOf<Self>,
-		nonces: RangeInclusive<MessageNonce>,
-		proof: Self::MessagesProof,
-	) -> Bytes;
-
-	/// Returns id of account that we're using to sign transactions at source chain (delivery proof).
-	fn source_transactions_author(&self) -> <Self::SourceChain as Chain>::AccountId;
-
-	/// Make messages receiving proof transaction.
-	fn make_messages_receiving_proof_transaction(
-		&self,
-		transaction_nonce: <Self::SourceChain as Chain>::Index,
-		generated_at_header: TargetHeaderIdOf<Self>,
-		proof: Self::MessagesReceivingProof,
-	) -> Bytes;
-}
-
-/// Substrate-to-Substrate message lane.
-#[derive(Debug)]
-pub struct SubstrateMessageLaneToSubstrate<Source: Chain, SourceSignParams, Target: Chain, TargetSignParams> {
-	/// Client for the source Substrate chain.
-	pub(crate) source_client: Client<Source>,
-	/// Parameters required to sign transactions for source chain.
-	pub(crate) source_sign: SourceSignParams,
-	/// Client for the target Substrate chain.
-	pub(crate) target_client: Client<Target>,
-	/// Parameters required to sign transactions for target chain.
-	pub(crate) target_sign: TargetSignParams,
-	/// Account id of relayer at the source chain.
-	pub(crate) relayer_id_at_source: Source::AccountId,
-}
-
-impl<Source: Chain, SourceSignParams: Clone, Target: Chain, TargetSignParams: Clone> Clone
-	for SubstrateMessageLaneToSubstrate<Source, SourceSignParams, Target, TargetSignParams>
-{
-	fn clone(&self) -> Self {
-		Self {
-			source_client: self.source_client.clone(),
-			source_sign: self.source_sign.clone(),
-			target_client: self.target_client.clone(),
-			target_sign: self.target_sign.clone(),
-			relayer_id_at_source: self.relayer_id_at_source.clone(),
-		}
-	}
-}
-
-impl<Source: Chain, SourceSignParams, Target: Chain, TargetSignParams> MessageLane
-	for SubstrateMessageLaneToSubstrate<Source, SourceSignParams, Target, TargetSignParams>
-where
-	SourceSignParams: Clone + Send + Sync + 'static,
-	TargetSignParams: Clone + Send + Sync + 'static,
-	BlockNumberOf<Source>: BlockNumberBase,
-	BlockNumberOf<Target>: BlockNumberBase,
-{
-	const SOURCE_NAME: &'static str = Source::NAME;
-	const TARGET_NAME: &'static str = Target::NAME;
-
-	type MessagesProof = SubstrateMessagesProof<Source>;
-	type MessagesReceivingProof = SubstrateMessagesReceivingProof<Target>;
-
-	type SourceChainBalance = Source::Balance;
-	type SourceHeaderNumber = BlockNumberOf<Source>;
-	type SourceHeaderHash = HashOf<Source>;
-
-	type TargetHeaderNumber = BlockNumberOf<Target>;
-	type TargetHeaderHash = HashOf<Target>;
-}
-
-/// Returns maximal number of messages and their maximal cumulative dispatch weight, based
-/// on given chain parameters.
-pub fn select_delivery_transaction_limits<W: pallet_bridge_messages::WeightInfoExt>(
-	max_extrinsic_weight: Weight,
-	max_unconfirmed_messages_at_inbound_lane: MessageNonce,
-) -> (MessageNonce, Weight) {
-	// We may try to guess accurate value, based on maximal number of messages and per-message
-	// weight overhead, but the relay loop isn't using this info in a super-accurate way anyway.
-	// So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is
-	// for messages dispatch.
-
-	// Another thing to keep in mind is that our runtimes (when this code was written) accept
-	// messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than
-	// that for dispatch.
-
-	let weight_for_delivery_tx = max_extrinsic_weight / 3;
-	let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx;
-
-	let delivery_tx_base_weight =
-		W::receive_messages_proof_overhead() + W::receive_messages_proof_outbound_lane_state_overhead();
-	let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight;
-	let max_number_of_messages = std::cmp::min(
-		delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1),
-		max_unconfirmed_messages_at_inbound_lane,
-	);
-
-	assert!(
-		max_number_of_messages > 0,
-		"Relay should fit at least one message in every delivery transaction",
-	);
-	assert!(
-		weight_for_messages_dispatch >= max_extrinsic_weight / 2,
-		"Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2",
-	);
-
-	(max_number_of_messages, weight_for_messages_dispatch)
-}
-
-#[cfg(test)]
-mod tests {
-	use super::*;
-
-	type RialtoToMillauMessagesWeights = pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>;
-
-	#[test]
-	fn select_delivery_transaction_limits_works() {
-		let (max_count, max_weight) = select_delivery_transaction_limits::<RialtoToMillauMessagesWeights>(
-			bp_millau::max_extrinsic_weight(),
-			bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
-		);
-		assert_eq!(
-			(max_count, max_weight),
-			// We don't actually care about these values, so feel free to update them whenever test
-			// fails. The only thing to do before that is to ensure that new values looks sane: i.e. weight
-			// reserved for messages dispatch allows dispatch of non-trivial messages.
-			//
-			// Any significant change in this values should attract additional attention.
-			(782, 216_583_333_334),
-		);
-	}
-}
diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_target.rs b/polkadot/bridges/relays/bin-substrate/src/messages_target.rs
deleted file mode 100644
index f74efbe61b5af2d9c58d803bda4fcba0d4ba9456..0000000000000000000000000000000000000000
--- a/polkadot/bridges/relays/bin-substrate/src/messages_target.rs
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2019-2021 Parity Technologies (UK) Ltd.
-// This file is part of Parity Bridges Common.
-
-// Parity Bridges Common is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// Parity Bridges Common is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
-
-//! Substrate client as Substrate messages target. The chain we connect to should have
-//! runtime that implements `<BridgedChainName>HeaderApi` to allow bridging with
-//! <BridgedName> chain.
-
-use crate::messages_lane::SubstrateMessageLane;
-use crate::messages_source::read_client_state;
-use crate::on_demand_headers::OnDemandHeadersRelay;
-
-use async_trait::async_trait;
-use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
-use bp_runtime::ChainId;
-use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof;
-use codec::{Decode, Encode};
-use frame_support::{traits::Instance, weights::Weight};
-use messages_relay::{
-	message_lane::{SourceHeaderIdOf, TargetHeaderIdOf},
-	message_lane_loop::{TargetClient, TargetClientState},
-};
-use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf};
-use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase};
-use sp_core::Bytes;
-use sp_runtime::{traits::Header as HeaderT, DeserializeOwned};
-use std::{marker::PhantomData, ops::RangeInclusive};
-
-/// Message receiving proof returned by the target Substrate node.
-pub type SubstrateMessagesReceivingProof<C> = (
-	UnrewardedRelayersState,
-	FromBridgedChainMessagesDeliveryProof<HashOf<C>>,
-);
-
-/// Substrate client as Substrate messages target.
-pub struct SubstrateMessagesTarget<C: Chain, P: SubstrateMessageLane, I> {
-	client: Client<C>,
-	lane: P,
-	lane_id: LaneId,
-	instance: ChainId,
-	source_to_target_headers_relay: Option<OnDemandHeadersRelay<P::SourceChain>>,
-	_phantom: PhantomData<I>,
-}
-
-impl<C: Chain, P: SubstrateMessageLane, I> SubstrateMessagesTarget<C, P, I> {
-	/// Create new Substrate headers target.
-	pub fn new(
-		client: Client<C>,
-		lane: P,
-		lane_id: LaneId,
-		instance: ChainId,
-		source_to_target_headers_relay: Option<OnDemandHeadersRelay<P::SourceChain>>,
-	) -> Self {
-		SubstrateMessagesTarget {
-			client,
-			lane,
-			lane_id,
-			instance,
-			source_to_target_headers_relay,
-			_phantom: Default::default(),
-		}
-	}
-}
-
-impl<C: Chain, P: SubstrateMessageLane, I> Clone for SubstrateMessagesTarget<C, P, I> {
-	fn clone(&self) -> Self {
-		Self {
-			client: self.client.clone(),
-			lane: self.lane.clone(),
-			lane_id: self.lane_id,
-			instance: self.instance,
-			source_to_target_headers_relay: self.source_to_target_headers_relay.clone(),
-			_phantom: Default::default(),
-		}
-	}
-}
-
-#[async_trait]
-impl<C, P, I> RelayClient for SubstrateMessagesTarget<C, P, I>
-where
-	C: Chain,
-	P: SubstrateMessageLane,
-	I: Send + Sync + Instance,
-{
-	type Error = SubstrateError;
-
-	async fn reconnect(&mut self) -> Result<(), SubstrateError> {
-		self.client.reconnect().await
-	}
-}
-
-#[async_trait]
-impl<C, P, I> TargetClient<P> for SubstrateMessagesTarget<C, P, I>
-where
-	C: Chain,
-	C::Header: DeserializeOwned,
-	C::Index: DeserializeOwned,
-	<C::Header as HeaderT>::Number: BlockNumberBase,
-	P: SubstrateMessageLane<
-		TargetChain = C,
-		MessagesReceivingProof = SubstrateMessagesReceivingProof<C>,
-		TargetHeaderNumber = <C::Header as HeaderT>::Number,
-		TargetHeaderHash = <C::Header as HeaderT>::Hash,
-	>,
-	P::SourceChain: Chain<Hash = P::SourceHeaderHash, BlockNumber = P::SourceHeaderNumber>,
-	P::SourceHeaderNumber: Decode,
-	P::SourceHeaderHash: Decode,
-	I: Send + Sync + Instance,
-{
-	async fn state(&self) -> Result<TargetClientState<P>, SubstrateError> {
-		// we can't continue to deliver messages if target node is out of sync, because
-		// it may have already received (some of) messages that we're going to deliver
-		self.client.ensure_synced().await?;
-
-		read_client_state::<_, P::SourceHeaderHash, P::SourceHeaderNumber>(
-			&self.client,
-			P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET,
-		)
-		.await
-	}
-
-	async fn latest_received_nonce(
-		&self,
-		id: TargetHeaderIdOf<P>,
-	) -> Result<(TargetHeaderIdOf<P>, MessageNonce), SubstrateError> {
-		let encoded_response = self
-			.client
-			.state_call(
-				P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(),
-				Bytes(self.lane_id.encode()),
-				Some(id.1),
-			)
-			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
-		Ok((id, latest_received_nonce))
-	}
-
-	async fn latest_confirmed_received_nonce(
-		&self,
-		id: TargetHeaderIdOf<P>,
-	) -> Result<(TargetHeaderIdOf<P>, MessageNonce), SubstrateError> {
-		let encoded_response = self
-			.client
-			.state_call(
-				P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(),
-				Bytes(self.lane_id.encode()),
-				Some(id.1),
-			)
-			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
-		Ok((id, latest_received_nonce))
-	}
-
-	async fn unrewarded_relayers_state(
-		&self,
-		id: TargetHeaderIdOf<P>,
-	) -> Result<(TargetHeaderIdOf<P>, UnrewardedRelayersState), SubstrateError> {
-		let encoded_response = self
-			.client
-			.state_call(
-				P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(),
-				Bytes(self.lane_id.encode()),
-				Some(id.1),
-			)
-			.await?;
-		let unrewarded_relayers_state: UnrewardedRelayersState =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
-		Ok((id, unrewarded_relayers_state))
-	}
-
-	async fn prove_messages_receiving(
-		&self,
-		id: TargetHeaderIdOf<P>,
-	) -> Result<(TargetHeaderIdOf<P>, P::MessagesReceivingProof), SubstrateError> {
-		let (id, relayers_state) = self.unrewarded_relayers_state(id).await?;
-		let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key::<I>(&self.lane_id);
-		let proof = self
-			.client
-			.prove_storage(vec![inbound_data_key], id.1)
-			.await?
-			.iter_nodes()
-			.collect();
-		let proof = FromBridgedChainMessagesDeliveryProof {
-			bridged_header_hash: id.1,
-			storage_proof: proof,
-			lane: self.lane_id,
-		};
-		Ok((id, (relayers_state, proof)))
-	}
-
-	async fn submit_messages_proof(
-		&self,
-		generated_at_header: SourceHeaderIdOf<P>,
-		nonces: RangeInclusive<MessageNonce>,
-		proof: P::MessagesProof,
-	) -> Result<RangeInclusive<MessageNonce>, SubstrateError> {
-		self.client
-			.submit_signed_extrinsic(self.lane.target_transactions_author(), |transaction_nonce| {
-				self.lane.make_messages_delivery_transaction(
-					transaction_nonce,
-					generated_at_header,
-					nonces.clone(),
-					proof,
-				)
-			})
-			.await?;
-		Ok(nonces)
-	}
-
-	async fn require_source_header_on_target(&self, id: SourceHeaderIdOf<P>) {
-		if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay {
-			source_to_target_headers_relay.require_finalized_header(id).await;
-		}
-	}
-
-	async fn estimate_delivery_transaction_in_source_tokens(
-		&self,
-		_nonces: RangeInclusive<MessageNonce>,
-		_total_dispatch_weight: Weight,
-		_total_size: u32,
-	) -> P::SourceChainBalance {
-		num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997
-	}
-}
diff --git a/polkadot/bridges/relays/client-ethereum/Cargo.toml b/polkadot/bridges/relays/client-ethereum/Cargo.toml
index 64a76a6b5dae3ccd4507d414f51293b711fc44d4..171988a32533b21326e6c72a0751b246f136c712 100644
--- a/polkadot/bridges/relays/client-ethereum/Cargo.toml
+++ b/polkadot/bridges/relays/client-ethereum/Cargo.toml
@@ -8,12 +8,13 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 [dependencies]
 async-std = "1.6.5"
 bp-eth-poa = { path = "../../primitives/ethereum-poa" }
-codec = { package = "parity-scale-codec", version = "2.0.0" }
 headers-relay = { path = "../headers" }
 hex-literal = "0.3"
-jsonrpsee-proc-macros = "=0.2.0-alpha.6"
-jsonrpsee-ws-client = "=0.2.0-alpha.6"
-libsecp256k1 = { version = "0.3.4", default-features = false, features = ["hmac"] }
+jsonrpsee-proc-macros = "0.3.1"
+jsonrpsee-ws-client = "0.3.1"
+libsecp256k1 = { version = "0.7", default-features = false, features = ["hmac"] }
 log = "0.4.11"
 relay-utils = { path = "../utils" }
-web3 = { version = "0.15", git = "https://github.com/tomusdrw/rust-web3", branch ="td-ethabi", default-features = false }
+tokio = "1.8"
+web3 = { git = "https://github.com/svyatonik/rust-web3.git", branch = "bump-deps" }
+thiserror = "1.0.26"
diff --git a/polkadot/bridges/relays/client-ethereum/src/client.rs b/polkadot/bridges/relays/client-ethereum/src/client.rs
index 71dac5df6d481aca302cb8ef14fbec920c11e8a6..48b7c9386f353a62a4d4388862d4c930a7c02c3e 100644
--- a/polkadot/bridges/relays/client-ethereum/src/client.rs
+++ b/polkadot/bridges/relays/client-ethereum/src/client.rs
@@ -14,16 +14,18 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::rpc::Ethereum;
-use crate::types::{
-	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx, SyncState, Transaction,
-	TransactionHash, H256, U256,
+use crate::{
+	rpc::Ethereum,
+	types::{
+		Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SignedRawTx,
+		SyncState, Transaction, TransactionHash, H256, U256,
+	},
+	ConnectionParams, Error, Result,
 };
-use crate::{ConnectionParams, Error, Result};
 
 use jsonrpsee_ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder};
 use relay_utils::relay_loop::RECONNECT_DELAY;
-use std::sync::Arc;
+use std::{future::Future, sync::Arc};
 
 /// Number of headers missing from the Ethereum node for us to consider node not synced.
 const MAJOR_SYNC_BLOCKS: u64 = 5;
@@ -31,6 +33,7 @@ const MAJOR_SYNC_BLOCKS: u64 = 5;
 /// The client used to interact with an Ethereum node through RPC.
 #[derive(Clone)]
 pub struct Client {
+	tokio: Arc<tokio::runtime::Runtime>,
 	params: ConnectionParams,
 	client: Arc<RpcClient>,
 }
@@ -56,25 +59,30 @@ impl Client {
 		}
 	}
 
-	/// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been established
-	/// or error otherwise.
+	/// Try to connect to Ethereum node. Returns Ethereum RPC client if connection has been
+	/// established or error otherwise.
 	pub async fn try_connect(params: ConnectionParams) -> Result<Self> {
-		Ok(Self {
-			client: Self::build_client(&params).await?,
-			params,
-		})
+		let (tokio, client) = Self::build_client(&params).await?;
+		Ok(Self { tokio, client, params })
 	}
 
 	/// Build client to use in connection.
-	async fn build_client(params: &ConnectionParams) -> Result<Arc<RpcClient>> {
+	async fn build_client(
+		params: &ConnectionParams,
+	) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
+		let tokio = tokio::runtime::Runtime::new()?;
 		let uri = format!("ws://{}:{}", params.host, params.port);
-		let client = RpcClientBuilder::default().build(&uri).await?;
-		Ok(Arc::new(client))
+		let client = tokio
+			.spawn(async move { RpcClientBuilder::default().build(&uri).await })
+			.await??;
+		Ok((Arc::new(tokio), Arc::new(client)))
 	}
 
 	/// Reopen client connection.
 	pub async fn reconnect(&mut self) -> Result<()> {
-		self.client = Self::build_client(&self.params).await?;
+		let (tokio, client) = Self::build_client(&self.params).await?;
+		self.tokio = tokio;
+		self.client = client;
 		Ok(())
 	}
 }
@@ -82,113 +90,176 @@ impl Client {
 impl Client {
 	/// Returns true if client is connected to at least one peer and is in synced state.
 	pub async fn ensure_synced(&self) -> Result<()> {
-		match Ethereum::syncing(&*self.client).await? {
-			SyncState::NotSyncing => Ok(()),
-			SyncState::Syncing(syncing) => {
-				let missing_headers = syncing.highest_block.saturating_sub(syncing.current_block);
-				if missing_headers > MAJOR_SYNC_BLOCKS.into() {
-					return Err(Error::ClientNotSynced(missing_headers));
-				}
-
-				Ok(())
+		self.jsonrpsee_execute(move |client| async move {
+			match Ethereum::syncing(&*client).await? {
+				SyncState::NotSyncing => Ok(()),
+				SyncState::Syncing(syncing) => {
+					let missing_headers =
+						syncing.highest_block.saturating_sub(syncing.current_block);
+					if missing_headers > MAJOR_SYNC_BLOCKS.into() {
+						return Err(Error::ClientNotSynced(missing_headers))
+					}
+
+					Ok(())
+				},
 			}
-		}
+		})
+		.await
 	}
 
 	/// Estimate gas usage for the given call.
 	pub async fn estimate_gas(&self, call_request: CallRequest) -> Result<U256> {
-		Ok(Ethereum::estimate_gas(&*self.client, call_request).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::estimate_gas(&*client, call_request).await?)
+		})
+		.await
 	}
 
 	/// Retrieve number of the best known block from the Ethereum node.
 	pub async fn best_block_number(&self) -> Result<u64> {
-		Ok(Ethereum::block_number(&*self.client).await?.as_u64())
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::block_number(&*client).await?.as_u64())
+		})
+		.await
 	}
 
 	/// Retrieve number of the best known block from the Ethereum node.
 	pub async fn header_by_number(&self, block_number: u64) -> Result<Header> {
-		let get_full_tx_objects = false;
-		let header = Ethereum::get_block_by_number(&*self.client, block_number, get_full_tx_objects).await?;
-		match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() {
-			true => Ok(header),
-			false => Err(Error::IncompleteHeader),
-		}
+		self.jsonrpsee_execute(move |client| async move {
+			let get_full_tx_objects = false;
+			let header =
+				Ethereum::get_block_by_number(&*client, block_number, get_full_tx_objects).await?;
+			match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() {
+				true => Ok(header),
+				false => Err(Error::IncompleteHeader),
+			}
+		})
+		.await
 	}
 
 	/// Retrieve block header by its hash from Ethereum node.
 	pub async fn header_by_hash(&self, hash: H256) -> Result<Header> {
-		let get_full_tx_objects = false;
-		let header = Ethereum::get_block_by_hash(&*self.client, hash, get_full_tx_objects).await?;
-		match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() {
-			true => Ok(header),
-			false => Err(Error::IncompleteHeader),
-		}
+		self.jsonrpsee_execute(move |client| async move {
+			let get_full_tx_objects = false;
+			let header = Ethereum::get_block_by_hash(&*client, hash, get_full_tx_objects).await?;
+			match header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some() {
+				true => Ok(header),
+				false => Err(Error::IncompleteHeader),
+			}
+		})
+		.await
 	}
 
 	/// Retrieve block header and its transactions by its number from Ethereum node.
-	pub async fn header_by_number_with_transactions(&self, number: u64) -> Result<HeaderWithTransactions> {
-		let get_full_tx_objects = true;
-		let header =
-			Ethereum::get_block_by_number_with_transactions(&*self.client, number, get_full_tx_objects).await?;
-
-		let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
-		if !is_complete_header {
-			return Err(Error::IncompleteHeader);
-		}
+	pub async fn header_by_number_with_transactions(
+		&self,
+		number: u64,
+	) -> Result<HeaderWithTransactions> {
+		self.jsonrpsee_execute(move |client| async move {
+			let get_full_tx_objects = true;
+			let header = Ethereum::get_block_by_number_with_transactions(
+				&*client,
+				number,
+				get_full_tx_objects,
+			)
+			.await?;
 
-		let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
-		if !is_complete_transactions {
-			return Err(Error::IncompleteTransaction);
-		}
+			let is_complete_header =
+				header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
+			if !is_complete_header {
+				return Err(Error::IncompleteHeader)
+			}
 
-		Ok(header)
+			let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
+			if !is_complete_transactions {
+				return Err(Error::IncompleteTransaction)
+			}
+
+			Ok(header)
+		})
+		.await
 	}
 
 	/// Retrieve block header and its transactions by its hash from Ethereum node.
-	pub async fn header_by_hash_with_transactions(&self, hash: H256) -> Result<HeaderWithTransactions> {
-		let get_full_tx_objects = true;
-		let header = Ethereum::get_block_by_hash_with_transactions(&*self.client, hash, get_full_tx_objects).await?;
+	pub async fn header_by_hash_with_transactions(
+		&self,
+		hash: H256,
+	) -> Result<HeaderWithTransactions> {
+		self.jsonrpsee_execute(move |client| async move {
+			let get_full_tx_objects = true;
+			let header =
+				Ethereum::get_block_by_hash_with_transactions(&*client, hash, get_full_tx_objects)
+					.await?;
 
-		let is_complete_header = header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
-		if !is_complete_header {
-			return Err(Error::IncompleteHeader);
-		}
+			let is_complete_header =
+				header.number.is_some() && header.hash.is_some() && header.logs_bloom.is_some();
+			if !is_complete_header {
+				return Err(Error::IncompleteHeader)
+			}
 
-		let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
-		if !is_complete_transactions {
-			return Err(Error::IncompleteTransaction);
-		}
+			let is_complete_transactions = header.transactions.iter().all(|tx| tx.raw.is_some());
+			if !is_complete_transactions {
+				return Err(Error::IncompleteTransaction)
+			}
 
-		Ok(header)
+			Ok(header)
+		})
+		.await
 	}
 
 	/// Retrieve transaction by its hash from Ethereum node.
 	pub async fn transaction_by_hash(&self, hash: H256) -> Result<Option<Transaction>> {
-		Ok(Ethereum::transaction_by_hash(&*self.client, hash).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::transaction_by_hash(&*client, hash).await?)
+		})
+		.await
 	}
 
 	/// Retrieve transaction receipt by transaction hash.
 	pub async fn transaction_receipt(&self, transaction_hash: H256) -> Result<Receipt> {
-		Ok(Ethereum::get_transaction_receipt(&*self.client, transaction_hash).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::get_transaction_receipt(&*client, transaction_hash).await?)
+		})
+		.await
 	}
 
 	/// Get the nonce of the given account.
 	pub async fn account_nonce(&self, address: Address) -> Result<U256> {
-		Ok(Ethereum::get_transaction_count(&*self.client, address).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::get_transaction_count(&*client, address).await?)
+		})
+		.await
 	}
 
 	/// Submit an Ethereum transaction.
 	///
 	/// The transaction must already be signed before sending it through this method.
 	pub async fn submit_transaction(&self, signed_raw_tx: SignedRawTx) -> Result<TransactionHash> {
-		let transaction = Bytes(signed_raw_tx);
-		let tx_hash = Ethereum::submit_transaction(&*self.client, transaction).await?;
-		log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash);
-		Ok(tx_hash)
+		self.jsonrpsee_execute(move |client| async move {
+			let transaction = Bytes(signed_raw_tx);
+			let tx_hash = Ethereum::submit_transaction(&*client, transaction).await?;
+			log::trace!(target: "bridge", "Sent transaction to Ethereum node: {:?}", tx_hash);
+			Ok(tx_hash)
+		})
+		.await
 	}
 
 	/// Call Ethereum smart contract.
 	pub async fn eth_call(&self, call_transaction: CallRequest) -> Result<Bytes> {
-		Ok(Ethereum::call(&*self.client, call_transaction).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Ethereum::call(&*client, call_transaction).await?)
+		})
+		.await
+	}
+
+	/// Execute jsonrpsee future in tokio context.
+	async fn jsonrpsee_execute<MF, F, T>(&self, make_jsonrpsee_future: MF) -> Result<T>
+	where
+		MF: FnOnce(Arc<RpcClient>) -> F + Send + 'static,
+		F: Future<Output = Result<T>> + Send,
+		T: Send + 'static,
+	{
+		let client = self.client.clone();
+		self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await?
 	}
 }
diff --git a/polkadot/bridges/relays/client-ethereum/src/error.rs b/polkadot/bridges/relays/client-ethereum/src/error.rs
index bcd8edc3f33ae5dc38a753003f83609f6590d676..6323b708fc0244881c727fb4e05e5b8716a8002c 100644
--- a/polkadot/bridges/relays/client-ethereum/src/error.rs
+++ b/polkadot/bridges/relays/client-ethereum/src/error.rs
@@ -18,38 +18,52 @@
 
 use crate::types::U256;
 
-use jsonrpsee_ws_client::Error as RpcError;
+use jsonrpsee_ws_client::types::Error as RpcError;
 use relay_utils::MaybeConnectionError;
+use thiserror::Error;
 
 /// Result type used by Ethereum client.
 pub type Result<T> = std::result::Result<T, Error>;
 
 /// Errors that can occur only when interacting with
 /// an Ethereum node through RPC.
-#[derive(Debug)]
+#[derive(Debug, Error)]
 pub enum Error {
+	/// IO error.
+	#[error("IO error: {0}")]
+	Io(#[from] std::io::Error),
 	/// An error that can occur when making an HTTP request to
 	/// an JSON-RPC client.
-	RpcError(RpcError),
+	#[error("RPC error: {0}")]
+	RpcError(#[from] RpcError),
 	/// Failed to parse response.
+	#[error("Response parse failed: {0}")]
 	ResponseParseFailed(String),
 	/// We have received a header with missing fields.
+	#[error("Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom).")]
 	IncompleteHeader,
 	/// We have received a transaction missing a `raw` field.
+	#[error("Incomplete Ethereum Transaction (missing required field - raw).")]
 	IncompleteTransaction,
 	/// An invalid Substrate block number was received from
 	/// an Ethereum node.
+	#[error("Received an invalid Substrate block from Ethereum Node.")]
 	InvalidSubstrateBlockNumber,
 	/// An invalid index has been received from an Ethereum node.
+	#[error("Received an invalid incomplete index from Ethereum Node.")]
 	InvalidIncompleteIndex,
 	/// The client we're connected to is not synced, so we can't rely on its state. Contains
 	/// number of unsynced headers.
+	#[error("Ethereum client is not synced: syncing {0} headers.")]
 	ClientNotSynced(U256),
+	/// Custom logic error.
+	#[error("{0}")]
+	Custom(String),
 }
 
-impl From<RpcError> for Error {
-	fn from(error: RpcError) -> Self {
-		Error::RpcError(error)
+impl From<tokio::task::JoinError> for Error {
+	fn from(error: tokio::task::JoinError) -> Self {
+		Error::Custom(format!("Failed to wait tokio task: {}", error))
 	}
 }
 
@@ -57,30 +71,12 @@ impl MaybeConnectionError for Error {
 	fn is_connection_error(&self) -> bool {
 		matches!(
 			*self,
-			Error::RpcError(RpcError::TransportError(_))
+			Error::RpcError(RpcError::Transport(_))
 				// right now if connection to the ws server is dropped (after it is already established),
 				// we're getting this error
 				| Error::RpcError(RpcError::Internal(_))
+				| Error::RpcError(RpcError::RestartNeeded(_))
 				| Error::ClientNotSynced(_),
 		)
 	}
 }
-
-impl ToString for Error {
-	fn to_string(&self) -> String {
-		match self {
-			Self::RpcError(e) => e.to_string(),
-			Self::ResponseParseFailed(e) => e.to_string(),
-			Self::IncompleteHeader => {
-				"Incomplete Ethereum Header Received (missing some of required fields - hash, number, logs_bloom)"
-					.to_string()
-			}
-			Self::IncompleteTransaction => "Incomplete Ethereum Transaction (missing required field - raw)".to_string(),
-			Self::InvalidSubstrateBlockNumber => "Received an invalid Substrate block from Ethereum Node".to_string(),
-			Self::InvalidIncompleteIndex => "Received an invalid incomplete index from Ethereum Node".to_string(),
-			Self::ClientNotSynced(missing_headers) => {
-				format!("Ethereum client is not synced: syncing {} headers", missing_headers)
-			}
-		}
-	}
-}
diff --git a/polkadot/bridges/relays/client-ethereum/src/lib.rs b/polkadot/bridges/relays/client-ethereum/src/lib.rs
index 8b3c6d8f8e733198ede31cc4de9aa95c5d41ea18..fa4877f8e5cfc28b5a47496756463bb4446f02bf 100644
--- a/polkadot/bridges/relays/client-ethereum/src/lib.rs
+++ b/polkadot/bridges/relays/client-ethereum/src/lib.rs
@@ -23,16 +23,18 @@ mod error;
 mod rpc;
 mod sign;
 
-pub use crate::client::Client;
-pub use crate::error::{Error, Result};
-pub use crate::sign::{sign_and_submit_transaction, SigningParams};
+pub use crate::{
+	client::Client,
+	error::{Error, Result},
+	sign::{sign_and_submit_transaction, SigningParams},
+};
 
 pub mod types;
 
 /// Ethereum-over-websocket connection params.
 #[derive(Debug, Clone)]
 pub struct ConnectionParams {
-	/// Websocket server hostname.
+	/// Websocket server host name.
 	pub host: String,
 	/// Websocket server TCP port.
 	pub port: u16,
@@ -40,9 +42,6 @@ pub struct ConnectionParams {
 
 impl Default for ConnectionParams {
 	fn default() -> Self {
-		ConnectionParams {
-			host: "localhost".into(),
-			port: 8546,
-		}
+		ConnectionParams { host: "localhost".into(), port: 8546 }
 	}
 }
diff --git a/polkadot/bridges/relays/client-ethereum/src/rpc.rs b/polkadot/bridges/relays/client-ethereum/src/rpc.rs
index 0fb81f7655a4622acdd0de4ae365f9cc67a4c15b..2479338b1015cac37652119554ff98a5a43737a5 100644
--- a/polkadot/bridges/relays/client-ethereum/src/rpc.rs
+++ b/polkadot/bridges/relays/client-ethereum/src/rpc.rs
@@ -17,8 +17,8 @@
 //! Ethereum node RPC interface.
 
 use crate::types::{
-	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction, TransactionHash,
-	H256, U256, U64,
+	Address, Bytes, CallRequest, Header, HeaderWithTransactions, Receipt, SyncState, Transaction,
+	TransactionHash, H256, U256, U64,
 };
 
 jsonrpsee_proc_macros::rpc_client_api! {
diff --git a/polkadot/bridges/relays/client-ethereum/src/sign.rs b/polkadot/bridges/relays/client-ethereum/src/sign.rs
index 6f479ab7d5cd4c8d4984593e5047e0c444326dda..86ddcc871c40e155d20c4ab1b8a535713ccba770 100644
--- a/polkadot/bridges/relays/client-ethereum/src/sign.rs
+++ b/polkadot/bridges/relays/client-ethereum/src/sign.rs
@@ -14,11 +14,13 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::types::{Address, CallRequest, U256};
-use crate::{Client, Result};
+use crate::{
+	types::{Address, CallRequest, U256},
+	Client, Result,
+};
 use bp_eth_poa::signatures::{secret_to_address, SignTransaction};
 use hex_literal::hex;
-use secp256k1::SecretKey;
+use libsecp256k1::SecretKey;
 
 /// Ethereum signing params.
 #[derive(Clone, Debug)]
@@ -47,7 +49,7 @@ impl Default for SigningParams {
 	}
 }
 
-/// Sign and submit tranaction using given Ethereum client.
+/// Sign and submit transaction using given Ethereum client.
 pub async fn sign_and_submit_transaction(
 	client: &Client,
 	params: &SigningParams,
diff --git a/polkadot/bridges/relays/client-kusama/Cargo.toml b/polkadot/bridges/relays/client-kusama/Cargo.toml
index b9c397bca6c0198eaf5b162570804d80b67cd09a..a48d82f641b701f2907c75f436f039f7589d373e 100644
--- a/polkadot/bridges/relays/client-kusama/Cargo.toml
+++ b/polkadot/bridges/relays/client-kusama/Cargo.toml
@@ -6,20 +6,25 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
+scale-info = { version = "1.0", features = ["derive"] }
 
 # Bridge dependencies
 
+bp-header-chain = { path = "../../primitives/header-chain" }
 bp-kusama = { path = "../../primitives/chain-kusama" }
+bp-message-dispatch = { path = "../../primitives/message-dispatch" }
+bp-messages = { path = "../../primitives/messages" }
+bp-polkadot = { path = "../../primitives/chain-polkadot" }
+bp-polkadot-core = { path = "../../primitives/polkadot-core" }
+bp-runtime = { path = "../../primitives/runtime" }
+bridge-runtime-common = { path = "../../bin/runtime-common" }
+pallet-bridge-dispatch = { path = "../../modules/dispatch" }
 
 # Substrate Dependencies
 
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-kusama/src/lib.rs b/polkadot/bridges/relays/client-kusama/src/lib.rs
index f2fba32dc1ed02f03ced2188d7905c542e29f9d1..a93726620ff61924e4457ad90da6a2623b2e1ef1 100644
--- a/polkadot/bridges/relays/client-kusama/src/lib.rs
+++ b/polkadot/bridges/relays/client-kusama/src/lib.rs
@@ -16,9 +16,17 @@
 
 //! Types used to connect to the Kusama chain.
 
-use relay_substrate_client::{Chain, ChainBase};
+use codec::Encode;
+use relay_substrate_client::{
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
+};
+use sp_core::{storage::StorageKey, Pair};
+use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
 
+pub mod runtime;
+
 /// Kusama header id.
 pub type HeaderId = relay_utils::HeaderId<bp_kusama::Hash, bp_kusama::BlockNumber>;
 
@@ -31,18 +39,86 @@ impl ChainBase for Kusama {
 	type Hash = bp_kusama::Hash;
 	type Hasher = bp_kusama::Hasher;
 	type Header = bp_kusama::Header;
+
+	type AccountId = bp_kusama::AccountId;
+	type Balance = bp_kusama::Balance;
+	type Index = bp_kusama::Nonce;
+	type Signature = bp_kusama::Signature;
 }
 
 impl Chain for Kusama {
 	const NAME: &'static str = "Kusama";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_kusama::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_kusama::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = bp_kusama::AccountId;
-	type Index = bp_kusama::Nonce;
 	type SignedBlock = bp_kusama::SignedBlock;
-	type Call = ();
-	type Balance = bp_kusama::Balance;
+	type Call = crate::runtime::Call;
+	type WeightToFee = bp_kusama::WeightToFee;
+}
+
+impl ChainWithBalances for Kusama {
+	fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey {
+		StorageKey(bp_kusama::account_info_storage_key(account_id))
+	}
+}
+
+impl TransactionSignScheme for Kusama {
+	type Chain = Kusama;
+	type AccountKeyPair = sp_core::sr25519::Pair;
+	type SignedTransaction = crate::runtime::UncheckedExtrinsic;
+
+	fn sign_transaction(
+		genesis_hash: <Self::Chain as ChainBase>::Hash,
+		signer: &Self::AccountKeyPair,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
+	) -> Self::SignedTransaction {
+		let raw_payload = SignedPayload::new(
+			unsigned.call,
+			bp_kusama::SignedExtensions::new(
+				bp_kusama::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
+		)
+		.expect("SignedExtension never fails.");
+
+		let signature = raw_payload.using_encoded(|payload| signer.sign(payload));
+		let signer: sp_runtime::MultiSigner = signer.public().into();
+		let (call, extra, _) = raw_payload.deconstruct();
+
+		bp_kusama::UncheckedExtrinsic::new_signed(
+			call,
+			sp_runtime::MultiAddress::Id(signer.into_account()),
+			signature.into(),
+			extra,
+		)
+	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| {
+				*address == bp_kusama::AccountId::from(*signer.public().as_array_ref()).into()
+			})
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
+	}
 }
 
 /// Kusama header type used in headers sync.
 pub type SyncHeader = relay_substrate_client::SyncHeader<bp_kusama::Header>;
+
+/// Kusama signing params.
+pub type SigningParams = sp_core::sr25519::Pair;
diff --git a/polkadot/bridges/relays/client-kusama/src/runtime.rs b/polkadot/bridges/relays/client-kusama/src/runtime.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6d0ab5462d7c8418ca8c71ea2f5815762143df4c
--- /dev/null
+++ b/polkadot/bridges/relays/client-kusama/src/runtime.rs
@@ -0,0 +1,154 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Types that are specific to the Kusama runtime.
+
+use bp_messages::{LaneId, UnrewardedRelayersState};
+use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike};
+use bp_runtime::Chain;
+use codec::{Compact, Decode, Encode};
+use frame_support::weights::Weight;
+use scale_info::TypeInfo;
+use sp_runtime::FixedU128;
+
+/// Unchecked Kusama extrinsic.
+pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
+
+/// Polkadot account ownership digest from Kusama.
+///
+/// The byte vector returned by this function should be signed with a Polkadot account private key.
+/// This way, the owner of `kusama_account_id` on Kusama proves that the Polkadot account private
+/// key is also under his control.
+pub fn kusama_to_polkadot_account_ownership_digest<Call, AccountId, SpecVersion>(
+	polkadot_call: &Call,
+	kusama_account_id: AccountId,
+	polkadot_spec_version: SpecVersion,
+) -> Vec<u8>
+where
+	Call: codec::Encode,
+	AccountId: codec::Encode,
+	SpecVersion: codec::Encode,
+{
+	pallet_bridge_dispatch::account_ownership_digest(
+		polkadot_call,
+		kusama_account_id,
+		polkadot_spec_version,
+		bp_runtime::KUSAMA_CHAIN_ID,
+		bp_runtime::POLKADOT_CHAIN_ID,
+	)
+}
+
+/// Kusama Runtime `Call` enum.
+///
+/// The enum represents a subset of possible `Call`s we can send to Kusama chain.
+/// Ideally this code would be auto-generated from metadata, because we want to
+/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
+///
+/// All entries here (like pretty much in the entire file) must be kept in sync with Kusama
+/// `construct_runtime`, so that we maintain SCALE-compatibility.
+///
+/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/kusama/src/lib.rs)
+#[allow(clippy::large_enum_variant)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+pub enum Call {
+	/// System pallet.
+	#[codec(index = 0)]
+	System(SystemCall),
+	/// Balances pallet.
+	#[codec(index = 4)]
+	Balances(BalancesCall),
+	/// Polkadot bridge pallet.
+	#[codec(index = 110)]
+	BridgePolkadotGrandpa(BridgePolkadotGrandpaCall),
+	/// Polkadot messages pallet.
+	#[codec(index = 111)]
+	BridgePolkadotMessages(BridgePolkadotMessagesCall),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum SystemCall {
+	#[codec(index = 1)]
+	remark(Vec<u8>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BalancesCall {
+	#[codec(index = 0)]
+	transfer(AccountAddress, Compact<Balance>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BridgePolkadotGrandpaCall {
+	#[codec(index = 0)]
+	submit_finality_proof(
+		Box<<PolkadotLike as Chain>::Header>,
+		bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
+	),
+	#[codec(index = 1)]
+	initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BridgePolkadotMessagesCall {
+	#[codec(index = 2)]
+	update_pallet_parameter(BridgePolkadotMessagesParameter),
+	#[codec(index = 3)]
+	send_message(
+		LaneId,
+		bp_message_dispatch::MessagePayload<
+			bp_kusama::AccountId,
+			bp_polkadot::AccountId,
+			bp_polkadot::AccountPublic,
+			Vec<u8>,
+		>,
+		bp_kusama::Balance,
+	),
+	#[codec(index = 5)]
+	receive_messages_proof(
+		bp_polkadot::AccountId,
+		bridge_runtime_common::messages::target::FromBridgedChainMessagesProof<bp_polkadot::Hash>,
+		u32,
+		Weight,
+	),
+	#[codec(index = 6)]
+	receive_messages_delivery_proof(
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_polkadot::Hash,
+		>,
+		UnrewardedRelayersState,
+	),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+pub enum BridgePolkadotMessagesParameter {
+	#[codec(index = 0)]
+	PolkadotToKusamaConversionRate(FixedU128),
+}
+
+impl sp_runtime::traits::Dispatchable for Call {
+	type Origin = ();
+	type Config = ();
+	type Info = ();
+	type PostInfo = ();
+
+	fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo<Self::PostInfo> {
+		unimplemented!("The Call is not expected to be dispatched.")
+	}
+}
diff --git a/polkadot/bridges/relays/client-millau/Cargo.toml b/polkadot/bridges/relays/client-millau/Cargo.toml
index e16f06f8528b674e0466986431d05bf5a2de55bc..49d9dade154c2d2cb6321b926eccad48459a87cb 100644
--- a/polkadot/bridges/relays/client-millau/Cargo.toml
+++ b/polkadot/bridges/relays/client-millau/Cargo.toml
@@ -6,13 +6,13 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
 
 # Supported Chains
 
+bp-millau = { path = "../../primitives/chain-millau" }
 millau-runtime = { path = "../../bin/millau/runtime" }
 
 # Substrate Dependencies
@@ -21,5 +21,4 @@ frame-support = { git = "https://github.com/paritytech/substrate", branch = "mas
 frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-millau/src/lib.rs b/polkadot/bridges/relays/client-millau/src/lib.rs
index 8597d9e59200f26d8e9b4958cf39751e8f5e2365..3f1aba1f3b372493b26d6217f206dbac66695bda 100644
--- a/polkadot/bridges/relays/client-millau/src/lib.rs
+++ b/polkadot/bridges/relays/client-millau/src/lib.rs
@@ -16,8 +16,11 @@
 
 //! Types used to connect to the Millau-Substrate chain.
 
-use codec::Encode;
-use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme};
+use codec::{Compact, Decode, Encode};
+use relay_substrate_client::{
+	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf,
+	TransactionSignScheme, UnsignedTransaction,
+};
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
@@ -34,17 +37,22 @@ impl ChainBase for Millau {
 	type Hash = millau_runtime::Hash;
 	type Hasher = millau_runtime::Hashing;
 	type Header = millau_runtime::Header;
+
+	type AccountId = millau_runtime::AccountId;
+	type Balance = millau_runtime::Balance;
+	type Index = millau_runtime::Index;
+	type Signature = millau_runtime::Signature;
 }
 
 impl Chain for Millau {
 	const NAME: &'static str = "Millau";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_millau::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = millau_runtime::AccountId;
-	type Index = millau_runtime::Index;
 	type SignedBlock = millau_runtime::SignedBlock;
 	type Call = millau_runtime::Call;
-	type Balance = millau_runtime::Balance;
+	type WeightToFee = bp_millau::WeightToFee;
 }
 
 impl ChainWithBalances for Millau {
@@ -64,25 +72,25 @@ impl TransactionSignScheme for Millau {
 	fn sign_transaction(
 		genesis_hash: <Self::Chain as ChainBase>::Hash,
 		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::from_raw(
-			call,
+			unsigned.call,
 			(
 				frame_system::CheckSpecVersion::<millau_runtime::Runtime>::new(),
 				frame_system::CheckTxVersion::<millau_runtime::Runtime>::new(),
 				frame_system::CheckGenesis::<millau_runtime::Runtime>::new(),
-				frame_system::CheckEra::<millau_runtime::Runtime>::from(sp_runtime::generic::Era::Immortal),
-				frame_system::CheckNonce::<millau_runtime::Runtime>::from(signer_nonce),
+				frame_system::CheckEra::<millau_runtime::Runtime>::from(era.frame_era()),
+				frame_system::CheckNonce::<millau_runtime::Runtime>::from(unsigned.nonce),
 				frame_system::CheckWeight::<millau_runtime::Runtime>::new(),
-				pallet_transaction_payment::ChargeTransactionPayment::<millau_runtime::Runtime>::from(0),
+				pallet_transaction_payment::ChargeTransactionPayment::<millau_runtime::Runtime>::from(unsigned.tip),
 			),
 			(
 				millau_runtime::VERSION.spec_version,
 				millau_runtime::VERSION.transaction_version,
 				genesis_hash,
-				genesis_hash,
+				era.signed_payload(genesis_hash),
 				(),
 				(),
 				(),
@@ -92,7 +100,36 @@ impl TransactionSignScheme for Millau {
 		let signer: sp_runtime::MultiSigner = signer.public().into();
 		let (call, extra, _) = raw_payload.deconstruct();
 
-		millau_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra)
+		millau_runtime::UncheckedExtrinsic::new_signed(
+			call,
+			signer.into_account(),
+			signature.into(),
+			extra,
+		)
+	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| {
+				*address == millau_runtime::Address::from(*signer.public().as_array_ref())
+			})
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction {
+			call: tx.function,
+			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..]).ok()?.into(),
+			tip: Compact::<BalanceOf<Self::Chain>>::decode(&mut &extra.6.encode()[..])
+				.ok()?
+				.into(),
+		})
 	}
 }
 
diff --git a/polkadot/bridges/relays/client-polkadot/Cargo.toml b/polkadot/bridges/relays/client-polkadot/Cargo.toml
index b148745f5a9872778d45e1bfb0ea8ea1484db90b..ff7748657941195d962df03f2f1c8374a570cff6 100644
--- a/polkadot/bridges/relays/client-polkadot/Cargo.toml
+++ b/polkadot/bridges/relays/client-polkadot/Cargo.toml
@@ -6,20 +6,25 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
+scale-info = { version = "1.0", features = ["derive"] }
 
 # Bridge dependencies
 
+bp-header-chain = { path = "../../primitives/header-chain" }
+bp-kusama = { path = "../../primitives/chain-kusama" }
+bp-message-dispatch = { path = "../../primitives/message-dispatch" }
+bp-messages = { path = "../../primitives/messages" }
 bp-polkadot = { path = "../../primitives/chain-polkadot" }
+bp-polkadot-core = { path = "../../primitives/polkadot-core" }
+bp-runtime = { path = "../../primitives/runtime" }
+bridge-runtime-common = { path = "../../bin/runtime-common" }
+pallet-bridge-dispatch = { path = "../../modules/dispatch" }
 
 # Substrate Dependencies
 
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-polkadot/src/lib.rs b/polkadot/bridges/relays/client-polkadot/src/lib.rs
index e502463187d2c2c49790e36886d76bd73ff13895..e6ceabf583e0bfa3e27ebbce9641d57340cbb94d 100644
--- a/polkadot/bridges/relays/client-polkadot/src/lib.rs
+++ b/polkadot/bridges/relays/client-polkadot/src/lib.rs
@@ -16,9 +16,17 @@
 
 //! Types used to connect to the Polkadot chain.
 
-use relay_substrate_client::{Chain, ChainBase};
+use codec::Encode;
+use relay_substrate_client::{
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
+};
+use sp_core::{storage::StorageKey, Pair};
+use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
 
+pub mod runtime;
+
 /// Polkadot header id.
 pub type HeaderId = relay_utils::HeaderId<bp_polkadot::Hash, bp_polkadot::BlockNumber>;
 
@@ -31,18 +39,86 @@ impl ChainBase for Polkadot {
 	type Hash = bp_polkadot::Hash;
 	type Hasher = bp_polkadot::Hasher;
 	type Header = bp_polkadot::Header;
+
+	type AccountId = bp_polkadot::AccountId;
+	type Balance = bp_polkadot::Balance;
+	type Index = bp_polkadot::Nonce;
+	type Signature = bp_polkadot::Signature;
 }
 
 impl Chain for Polkadot {
 	const NAME: &'static str = "Polkadot";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_polkadot::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_polkadot::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = bp_polkadot::AccountId;
-	type Index = bp_polkadot::Nonce;
 	type SignedBlock = bp_polkadot::SignedBlock;
-	type Call = ();
-	type Balance = bp_polkadot::Balance;
+	type Call = crate::runtime::Call;
+	type WeightToFee = bp_polkadot::WeightToFee;
+}
+
+impl ChainWithBalances for Polkadot {
+	fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey {
+		StorageKey(bp_polkadot::account_info_storage_key(account_id))
+	}
+}
+
+impl TransactionSignScheme for Polkadot {
+	type Chain = Polkadot;
+	type AccountKeyPair = sp_core::sr25519::Pair;
+	type SignedTransaction = crate::runtime::UncheckedExtrinsic;
+
+	fn sign_transaction(
+		genesis_hash: <Self::Chain as ChainBase>::Hash,
+		signer: &Self::AccountKeyPair,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
+	) -> Self::SignedTransaction {
+		let raw_payload = SignedPayload::new(
+			unsigned.call,
+			bp_polkadot::SignedExtensions::new(
+				bp_polkadot::VERSION,
+				era,
+				genesis_hash,
+				unsigned.nonce,
+				unsigned.tip,
+			),
+		)
+		.expect("SignedExtension never fails.");
+
+		let signature = raw_payload.using_encoded(|payload| signer.sign(payload));
+		let signer: sp_runtime::MultiSigner = signer.public().into();
+		let (call, extra, _) = raw_payload.deconstruct();
+
+		bp_polkadot::UncheckedExtrinsic::new_signed(
+			call,
+			sp_runtime::MultiAddress::Id(signer.into_account()),
+			signature.into(),
+			extra,
+		)
+	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| {
+				*address == bp_polkadot::AccountId::from(*signer.public().as_array_ref()).into()
+			})
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
+	}
 }
 
 /// Polkadot header type used in headers sync.
 pub type SyncHeader = relay_substrate_client::SyncHeader<bp_polkadot::Header>;
+
+/// Polkadot signing params.
+pub type SigningParams = sp_core::sr25519::Pair;
diff --git a/polkadot/bridges/relays/client-polkadot/src/runtime.rs b/polkadot/bridges/relays/client-polkadot/src/runtime.rs
new file mode 100644
index 0000000000000000000000000000000000000000..8b125a37843c84198d919be6298a05df27520c72
--- /dev/null
+++ b/polkadot/bridges/relays/client-polkadot/src/runtime.rs
@@ -0,0 +1,154 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Types that are specific to the Polkadot runtime.
+
+use bp_messages::{LaneId, UnrewardedRelayersState};
+use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike};
+use bp_runtime::Chain;
+use codec::{Compact, Decode, Encode};
+use frame_support::weights::Weight;
+use scale_info::TypeInfo;
+use sp_runtime::FixedU128;
+
+/// Unchecked Polkadot extrinsic.
+pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
+
+/// Kusama account ownership digest from Polkadot.
+///
+/// The byte vector returned by this function should be signed with a Kusama account private key.
+/// This way, the owner of `kusam_account_id` on Polkadot proves that the Kusama account private key
+/// is also under his control.
+pub fn polkadot_to_kusama_account_ownership_digest<Call, AccountId, SpecVersion>(
+	kusama_call: &Call,
+	kusam_account_id: AccountId,
+	kusama_spec_version: SpecVersion,
+) -> Vec<u8>
+where
+	Call: codec::Encode,
+	AccountId: codec::Encode,
+	SpecVersion: codec::Encode,
+{
+	pallet_bridge_dispatch::account_ownership_digest(
+		kusama_call,
+		kusam_account_id,
+		kusama_spec_version,
+		bp_runtime::POLKADOT_CHAIN_ID,
+		bp_runtime::KUSAMA_CHAIN_ID,
+	)
+}
+
+/// Polkadot Runtime `Call` enum.
+///
+/// The enum represents a subset of possible `Call`s we can send to Polkadot chain.
+/// Ideally this code would be auto-generated from metadata, because we want to
+/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
+///
+/// All entries here (like pretty much in the entire file) must be kept in sync with Polkadot
+/// `construct_runtime`, so that we maintain SCALE-compatibility.
+///
+/// See: [link](https://github.com/paritytech/kusama/blob/master/runtime/kusam/src/lib.rs)
+#[allow(clippy::large_enum_variant)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+pub enum Call {
+	/// System pallet.
+	#[codec(index = 0)]
+	System(SystemCall),
+	/// Balances pallet.
+	#[codec(index = 5)]
+	Balances(BalancesCall),
+	/// Kusama bridge pallet.
+	#[codec(index = 110)]
+	BridgeKusamaGrandpa(BridgeKusamaGrandpaCall),
+	/// Kusama messages pallet.
+	#[codec(index = 111)]
+	BridgeKusamaMessages(BridgeKusamaMessagesCall),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum SystemCall {
+	#[codec(index = 1)]
+	remark(Vec<u8>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BalancesCall {
+	#[codec(index = 0)]
+	transfer(AccountAddress, Compact<Balance>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BridgeKusamaGrandpaCall {
+	#[codec(index = 0)]
+	submit_finality_proof(
+		Box<<PolkadotLike as Chain>::Header>,
+		bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
+	),
+	#[codec(index = 1)]
+	initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+#[allow(non_camel_case_types)]
+pub enum BridgeKusamaMessagesCall {
+	#[codec(index = 2)]
+	update_pallet_parameter(BridgeKusamaMessagesParameter),
+	#[codec(index = 3)]
+	send_message(
+		LaneId,
+		bp_message_dispatch::MessagePayload<
+			bp_polkadot::AccountId,
+			bp_kusama::AccountId,
+			bp_kusama::AccountPublic,
+			Vec<u8>,
+		>,
+		bp_polkadot::Balance,
+	),
+	#[codec(index = 5)]
+	receive_messages_proof(
+		bp_kusama::AccountId,
+		bridge_runtime_common::messages::target::FromBridgedChainMessagesProof<bp_kusama::Hash>,
+		u32,
+		Weight,
+	),
+	#[codec(index = 6)]
+	receive_messages_delivery_proof(
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_kusama::Hash,
+		>,
+		UnrewardedRelayersState,
+	),
+}
+
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
+pub enum BridgeKusamaMessagesParameter {
+	#[codec(index = 0)]
+	KusamaToPolkadotConversionRate(FixedU128),
+}
+
+impl sp_runtime::traits::Dispatchable for Call {
+	type Origin = ();
+	type Config = ();
+	type Info = ();
+	type PostInfo = ();
+
+	fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo<Self::PostInfo> {
+		unimplemented!("The Call is not expected to be dispatched.")
+	}
+}
diff --git a/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml b/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..e4518c6877652fec50440ac9b432344827733a12
--- /dev/null
+++ b/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "relay-rialto-parachain-client"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+relay-substrate-client = { path = "../client-substrate" }
+relay-utils = { path = "../utils" }
+
+# Bridge dependencies
+
+bp-rialto = { path = "../../primitives/chain-rialto" }
+rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" }
+
+# Substrate Dependencies
+
+frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs b/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..ca299a0eeb78bdbb7c11eca5859c0e7bd375a60f
--- /dev/null
+++ b/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs
@@ -0,0 +1,51 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Types used to connect to the Rialto-Substrate chain.
+
+use relay_substrate_client::{Chain, ChainBase};
+use std::time::Duration;
+
+/// Rialto header id.
+pub type HeaderId =
+	relay_utils::HeaderId<rialto_parachain_runtime::Hash, rialto_parachain_runtime::BlockNumber>;
+
+/// Rialto parachain definition
+#[derive(Debug, Clone, Copy)]
+pub struct RialtoParachain;
+
+impl ChainBase for RialtoParachain {
+	type BlockNumber = rialto_parachain_runtime::BlockNumber;
+	type Hash = rialto_parachain_runtime::Hash;
+	type Hasher = rialto_parachain_runtime::Hashing;
+	type Header = rialto_parachain_runtime::Header;
+
+	type AccountId = rialto_parachain_runtime::AccountId;
+	type Balance = rialto_parachain_runtime::Balance;
+	type Index = rialto_parachain_runtime::Index;
+	type Signature = rialto_parachain_runtime::Signature;
+}
+
+impl Chain for RialtoParachain {
+	const NAME: &'static str = "RialtoParachain";
+	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
+
+	type SignedBlock = rialto_parachain_runtime::SignedBlock;
+	type Call = rialto_parachain_runtime::Call;
+	type WeightToFee = bp_rialto::WeightToFee;
+}
diff --git a/polkadot/bridges/relays/client-rialto/Cargo.toml b/polkadot/bridges/relays/client-rialto/Cargo.toml
index 88e8e12add4019c56a8181891f12d65e31391e73..3132b26d27fc183e8e334747888c0bb6fcf2eb03 100644
--- a/polkadot/bridges/relays/client-rialto/Cargo.toml
+++ b/polkadot/bridges/relays/client-rialto/Cargo.toml
@@ -6,13 +6,13 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
 
 # Bridge dependencies
 
+bp-rialto = { path = "../../primitives/chain-rialto" }
 rialto-runtime = { path = "../../bin/rialto/runtime" }
 
 # Substrate Dependencies
@@ -21,5 +21,4 @@ frame-system = { git = "https://github.com/paritytech/substrate", branch = "mast
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
 pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-rialto/src/lib.rs b/polkadot/bridges/relays/client-rialto/src/lib.rs
index 4a0023a87c4f57445d5fe196d592242445b68490..42ed8bce3bd9b432726d7ba138f16668e50ebd6e 100644
--- a/polkadot/bridges/relays/client-rialto/src/lib.rs
+++ b/polkadot/bridges/relays/client-rialto/src/lib.rs
@@ -16,8 +16,11 @@
 
 //! Types used to connect to the Rialto-Substrate chain.
 
-use codec::Encode;
-use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme};
+use codec::{Compact, Decode, Encode};
+use relay_substrate_client::{
+	BalanceOf, Chain, ChainBase, ChainWithBalances, IndexOf, TransactionEraOf,
+	TransactionSignScheme, UnsignedTransaction,
+};
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
@@ -34,17 +37,22 @@ impl ChainBase for Rialto {
 	type Hash = rialto_runtime::Hash;
 	type Hasher = rialto_runtime::Hashing;
 	type Header = rialto_runtime::Header;
+
+	type AccountId = rialto_runtime::AccountId;
+	type Balance = rialto_runtime::Balance;
+	type Index = rialto_runtime::Index;
+	type Signature = rialto_runtime::Signature;
 }
 
 impl Chain for Rialto {
 	const NAME: &'static str = "Rialto";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = rialto_runtime::AccountId;
-	type Index = rialto_runtime::Index;
 	type SignedBlock = rialto_runtime::SignedBlock;
 	type Call = rialto_runtime::Call;
-	type Balance = rialto_runtime::Balance;
+	type WeightToFee = bp_rialto::WeightToFee;
 }
 
 impl ChainWithBalances for Rialto {
@@ -64,25 +72,25 @@ impl TransactionSignScheme for Rialto {
 	fn sign_transaction(
 		genesis_hash: <Self::Chain as ChainBase>::Hash,
 		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::from_raw(
-			call,
+			unsigned.call,
 			(
 				frame_system::CheckSpecVersion::<rialto_runtime::Runtime>::new(),
 				frame_system::CheckTxVersion::<rialto_runtime::Runtime>::new(),
 				frame_system::CheckGenesis::<rialto_runtime::Runtime>::new(),
-				frame_system::CheckEra::<rialto_runtime::Runtime>::from(sp_runtime::generic::Era::Immortal),
-				frame_system::CheckNonce::<rialto_runtime::Runtime>::from(signer_nonce),
+				frame_system::CheckEra::<rialto_runtime::Runtime>::from(era.frame_era()),
+				frame_system::CheckNonce::<rialto_runtime::Runtime>::from(unsigned.nonce),
 				frame_system::CheckWeight::<rialto_runtime::Runtime>::new(),
-				pallet_transaction_payment::ChargeTransactionPayment::<rialto_runtime::Runtime>::from(0),
+				pallet_transaction_payment::ChargeTransactionPayment::<rialto_runtime::Runtime>::from(unsigned.tip),
 			),
 			(
 				rialto_runtime::VERSION.spec_version,
 				rialto_runtime::VERSION.transaction_version,
 				genesis_hash,
-				genesis_hash,
+				era.signed_payload(genesis_hash),
 				(),
 				(),
 				(),
@@ -92,7 +100,34 @@ impl TransactionSignScheme for Rialto {
 		let signer: sp_runtime::MultiSigner = signer.public().into();
 		let (call, extra, _) = raw_payload.deconstruct();
 
-		rialto_runtime::UncheckedExtrinsic::new_signed(call, signer.into_account(), signature.into(), extra)
+		rialto_runtime::UncheckedExtrinsic::new_signed(
+			call,
+			signer.into_account().into(),
+			signature.into(),
+			extra,
+		)
+	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| *address == rialto_runtime::Address::Id(signer.public().into()))
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction {
+			call: tx.function,
+			nonce: Compact::<IndexOf<Self::Chain>>::decode(&mut &extra.4.encode()[..]).ok()?.into(),
+			tip: Compact::<BalanceOf<Self::Chain>>::decode(&mut &extra.6.encode()[..])
+				.ok()?
+				.into(),
+		})
 	}
 }
 
diff --git a/polkadot/bridges/relays/client-rococo/Cargo.toml b/polkadot/bridges/relays/client-rococo/Cargo.toml
index 5611ac27b1ce4b8409dff28a004e9219fe33eb58..28e97d3bf0cec3226402ac704eafe75dc3d7d4ad 100644
--- a/polkadot/bridges/relays/client-rococo/Cargo.toml
+++ b/polkadot/bridges/relays/client-rococo/Cargo.toml
@@ -6,12 +6,13 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
+scale-info = { version = "1.0", features = ["derive"] }
 
 # Bridge dependencies
+
 bridge-runtime-common = { path = "../../bin/runtime-common" }
 bp-header-chain = { path = "../../primitives/header-chain" }
 bp-message-dispatch = { path = "../../primitives/message-dispatch" }
@@ -24,9 +25,7 @@ pallet-bridge-dispatch = { path = "../../modules/dispatch" }
 pallet-bridge-messages = { path = "../../modules/messages" }
 
 # Substrate Dependencies
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-rococo/src/lib.rs b/polkadot/bridges/relays/client-rococo/src/lib.rs
index 5a7d8999f7f16f71e068ec6549c5606dd96ffafe..ad61e3cfd6437be5cf2c964d9f3f569beda51bdd 100644
--- a/polkadot/bridges/relays/client-rococo/src/lib.rs
+++ b/polkadot/bridges/relays/client-rococo/src/lib.rs
@@ -17,7 +17,10 @@
 //! Types used to connect to the Rococo-Substrate chain.
 
 use codec::Encode;
-use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme};
+use relay_substrate_client::{
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
+};
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
@@ -39,17 +42,22 @@ impl ChainBase for Rococo {
 	type Hash = bp_rococo::Hash;
 	type Hasher = bp_rococo::Hashing;
 	type Header = bp_rococo::Header;
+
+	type AccountId = bp_rococo::AccountId;
+	type Balance = bp_rococo::Balance;
+	type Index = bp_rococo::Nonce;
+	type Signature = bp_rococo::Signature;
 }
 
 impl Chain for Rococo {
 	const NAME: &'static str = "Rococo";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_rococo::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = bp_rococo::AccountId;
-	type Index = bp_rococo::Index;
 	type SignedBlock = bp_rococo::SignedBlock;
 	type Call = crate::runtime::Call;
-	type Balance = bp_rococo::Balance;
+	type WeightToFee = bp_rococo::WeightToFee;
 }
 
 impl ChainWithBalances for Rococo {
@@ -66,17 +74,17 @@ impl TransactionSignScheme for Rococo {
 	fn sign_transaction(
 		genesis_hash: <Self::Chain as ChainBase>::Hash,
 		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
-			call,
+			unsigned.call,
 			bp_rococo::SignedExtensions::new(
 				bp_rococo::VERSION,
-				sp_runtime::generic::Era::Immortal,
+				era,
 				genesis_hash,
-				signer_nonce,
-				0,
+				unsigned.nonce,
+				unsigned.tip,
 			),
 		)
 		.expect("SignedExtension never fails.");
@@ -92,6 +100,24 @@ impl TransactionSignScheme for Rococo {
 			extra,
 		)
 	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| {
+				*address == bp_rococo::AccountId::from(*signer.public().as_array_ref()).into()
+			})
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
+	}
 }
 
 /// Rococo signing params.
diff --git a/polkadot/bridges/relays/client-rococo/src/runtime.rs b/polkadot/bridges/relays/client-rococo/src/runtime.rs
index 6dbd40bee56086fff1cd102fd251223c51ab3945..effe6e5c60a9d87fe7163c156934a0e966d83f5c 100644
--- a/polkadot/bridges/relays/client-rococo/src/runtime.rs
+++ b/polkadot/bridges/relays/client-rococo/src/runtime.rs
@@ -21,9 +21,7 @@ use bp_polkadot_core::PolkadotLike;
 use bp_runtime::Chain;
 use codec::{Decode, Encode};
 use frame_support::weights::Weight;
-
-/// Instance of messages pallet that is used to bridge with Wococo chain.
-pub type WithWococoMessagesInstance = pallet_bridge_messages::Instance1;
+use scale_info::TypeInfo;
 
 /// Unchecked Rococo extrinsic.
 pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
@@ -55,15 +53,15 @@ where
 /// Rococo Runtime `Call` enum.
 ///
 /// The enum represents a subset of possible `Call`s we can send to Rococo chain.
-/// Ideally this code would be auto-generated from Metadata, because we want to
+/// Ideally this code would be auto-generated from metadata, because we want to
 /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
 ///
 /// All entries here (like pretty much in the entire file) must be kept in sync with Rococo
 /// `construct_runtime`, so that we maintain SCALE-compatibility.
 ///
-/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs
+/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs)
 #[allow(clippy::large_enum_variant)]
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 pub enum Call {
 	/// System pallet.
 	#[codec(index = 0)]
@@ -76,26 +74,26 @@ pub enum Call {
 	BridgeMessagesWococo(BridgeMessagesWococoCall),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum SystemCall {
 	#[codec(index = 1)]
 	remark(Vec<u8>),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum BridgeGrandpaWococoCall {
 	#[codec(index = 0)]
 	submit_finality_proof(
-		<PolkadotLike as Chain>::Header,
+		Box<<PolkadotLike as Chain>::Header>,
 		bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
 	),
 	#[codec(index = 1)]
 	initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum BridgeMessagesWococoCall {
 	#[codec(index = 3)]
@@ -118,7 +116,9 @@ pub enum BridgeMessagesWococoCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_wococo::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_wococo::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/polkadot/bridges/relays/client-substrate/Cargo.toml b/polkadot/bridges/relays/client-substrate/Cargo.toml
index f5c2e2656059362d0758e5051e034e491057216d..6a1173581e7a7ef477f27e484645b86bac418405 100644
--- a/polkadot/bridges/relays/client-substrate/Cargo.toml
+++ b/polkadot/bridges/relays/client-substrate/Cargo.toml
@@ -8,17 +8,18 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 [dependencies]
 async-std = { version = "1.6.5", features = ["attributes"] }
 async-trait = "0.1.40"
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-jsonrpsee-proc-macros = "=0.2.0-alpha.6"
-jsonrpsee-ws-client = "=0.2.0-alpha.6"
+codec = { package = "parity-scale-codec", version = "2.2.0" }
+jsonrpsee-proc-macros = "0.3.1"
+jsonrpsee-ws-client = "0.3.1"
 log = "0.4.11"
 num-traits = "0.2"
 rand = "0.7"
+tokio = "1.8"
+thiserror = "1.0.26"
 
 # Bridge dependencies
 
 bp-header-chain = { path = "../../primitives/header-chain" }
-bp-messages = { path = "../../primitives/messages" }
 bp-runtime = { path = "../../primitives/runtime" }
 finality-relay = { path = "../finality" }
 headers-relay = { path = "../headers" }
@@ -29,12 +30,15 @@ relay-utils = { path = "../utils" }
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
 frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" }
+pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
+pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" }
 
diff --git a/polkadot/bridges/relays/client-substrate/src/chain.rs b/polkadot/bridges/relays/client-substrate/src/chain.rs
index 886b1bde1ce6740b9526dfeeca5f780d191beac4..75789ce37f308572ce96dac8d8886e4924db5672 100644
--- a/polkadot/bridges/relays/client-substrate/src/chain.rs
+++ b/polkadot/bridges/relays/client-substrate/src/chain.rs
@@ -14,16 +14,16 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use bp_runtime::Chain as ChainBase;
-use frame_support::Parameter;
-use jsonrpsee_ws_client::{DeserializeOwned, Serialize};
-use num_traits::{CheckedSub, SaturatingAdd, Zero};
+use bp_runtime::{Chain as ChainBase, HashOf, TransactionEraOf};
+use codec::{Codec, Encode};
+use frame_support::weights::WeightToFeePolynomial;
+use jsonrpsee_ws_client::types::{DeserializeOwned, Serialize};
+use num_traits::Zero;
+use sc_transaction_pool_api::TransactionStatus;
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{
 	generic::SignedBlock,
-	traits::{
-		AtLeast32Bit, Block as BlockT, Dispatchable, MaybeDisplay, MaybeSerialize, MaybeSerializeDeserialize, Member,
-	},
+	traits::{Block as BlockT, Dispatchable, Member},
 	EncodedJustification,
 };
 use std::{fmt::Debug, time::Duration};
@@ -37,30 +37,27 @@ pub trait Chain: ChainBase + Clone {
 	/// How often blocks are produced on that chain. It's suggested to set this value
 	/// to match the block time of the chain.
 	const AVERAGE_BLOCK_INTERVAL: Duration;
+	/// Maximal expected storage proof overhead (in bytes).
+	const STORAGE_PROOF_OVERHEAD: u32;
+	/// Maximal size (in bytes) of SCALE-encoded account id on this chain.
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32;
 
-	/// The user account identifier type for the runtime.
-	type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord + Default;
-	/// Index of a transaction used by the chain.
-	type Index: Parameter
-		+ Member
-		+ MaybeSerialize
-		+ Debug
-		+ Default
-		+ MaybeDisplay
-		+ DeserializeOwned
-		+ AtLeast32Bit
-		+ Copy;
 	/// Block type.
 	type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification<Self::Header>;
 	/// The aggregated `Call` type.
-	type Call: Dispatchable + Debug;
-	/// Balance of an account in native tokens.
-	///
-	/// The chain may support multiple tokens, but this particular type is for token that is used
-	/// to pay for transaction dispatch, to reward different relayers (headers, messages), etc.
-	type Balance: Parameter + Member + DeserializeOwned + Clone + Copy + CheckedSub + PartialOrd + SaturatingAdd + Zero;
+	type Call: Clone + Dispatchable + Debug;
+
+	/// Type that is used by the chain, to convert from weight to fee.
+	type WeightToFee: WeightToFeePolynomial<Balance = Self::Balance>;
 }
 
+/// Call type used by the chain.
+pub type CallOf<C> = <C as Chain>::Call;
+/// Weight-to-Fee type used by the chain.
+pub type WeightToFeeOf<C> = <C as Chain>::WeightToFee;
+/// Transaction status of the chain.
+pub type TransactionStatusOf<C> = TransactionStatus<HashOf<C>, HashOf<C>>;
+
 /// Substrate-based chain with `frame_system::Config::AccountData` set to
 /// the `pallet_balances::AccountData<Balance>`.
 pub trait ChainWithBalances: Chain {
@@ -68,14 +65,43 @@ pub trait ChainWithBalances: Chain {
 	fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey;
 }
 
+/// SCALE-encoded extrinsic.
+pub type EncodedExtrinsic = Vec<u8>;
+
 /// Block with justification.
 pub trait BlockWithJustification<Header> {
 	/// Return block header.
 	fn header(&self) -> Header;
+	/// Return encoded block extrinsics.
+	fn extrinsics(&self) -> Vec<EncodedExtrinsic>;
 	/// Return block justification, if known.
 	fn justification(&self) -> Option<&EncodedJustification>;
 }
 
+/// Transaction before it is signed.
+#[derive(Clone, Debug)]
+pub struct UnsignedTransaction<C: Chain> {
+	/// Runtime call of this transaction.
+	pub call: C::Call,
+	/// Transaction nonce.
+	pub nonce: C::Index,
+	/// Tip included into transaction.
+	pub tip: C::Balance,
+}
+
+impl<C: Chain> UnsignedTransaction<C> {
+	/// Create new unsigned transaction with given call, nonce and zero tip.
+	pub fn new(call: C::Call, nonce: C::Index) -> Self {
+		Self { call, nonce, tip: Zero::zero() }
+	}
+
+	/// Set transaction tip.
+	pub fn tip(mut self, tip: C::Balance) -> Self {
+		self.tip = tip;
+		self
+	}
+}
+
 /// Substrate-based chain transactions signing scheme.
 pub trait TransactionSignScheme {
 	/// Chain that this scheme is to be used.
@@ -83,15 +109,26 @@ pub trait TransactionSignScheme {
 	/// Type of key pairs used to sign transactions.
 	type AccountKeyPair: Pair;
 	/// Signed transaction.
-	type SignedTransaction;
+	type SignedTransaction: Clone + Debug + Codec + Send + 'static;
 
 	/// Create transaction for given runtime call, signed by given account.
 	fn sign_transaction(
 		genesis_hash: <Self::Chain as ChainBase>::Hash,
 		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
 	) -> Self::SignedTransaction;
+
+	/// Returns true if transaction is signed.
+	fn is_signed(tx: &Self::SignedTransaction) -> bool;
+
+	/// Returns true if transaction is signed by given signer.
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool;
+
+	/// Parse signed transaction into its unsigned part.
+	///
+	/// Returns `None` if signed transaction has unsupported format.
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>>;
 }
 
 impl<Block: BlockT> BlockWithJustification<Block::Header> for SignedBlock<Block> {
@@ -99,6 +136,10 @@ impl<Block: BlockT> BlockWithJustification<Block::Header> for SignedBlock<Block>
 		self.block.header().clone()
 	}
 
+	fn extrinsics(&self) -> Vec<EncodedExtrinsic> {
+		self.block.extrinsics().iter().map(Encode::encode).collect()
+	}
+
 	fn justification(&self) -> Option<&EncodedJustification> {
 		self.justifications
 			.as_ref()
diff --git a/polkadot/bridges/relays/client-substrate/src/client.rs b/polkadot/bridges/relays/client-substrate/src/client.rs
index e4f503b91752ba2cfa5908a5efdb339ef9917315..1902875c938107c2e2e54eb012f1ad5fe2bf7f97 100644
--- a/polkadot/bridges/relays/client-substrate/src/client.rs
+++ b/polkadot/bridges/relays/client-substrate/src/client.rs
@@ -16,27 +16,46 @@
 
 //! Substrate node client.
 
-use crate::chain::{Chain, ChainWithBalances};
-use crate::rpc::Substrate;
-use crate::{ConnectionParams, Error, Result};
+use crate::{
+	chain::{Chain, ChainWithBalances, TransactionStatusOf},
+	rpc::Substrate,
+	ConnectionParams, Error, HashOf, HeaderIdOf, Result,
+};
 
 use async_std::sync::{Arc, Mutex};
-use codec::Decode;
+use async_trait::async_trait;
+use codec::{Decode, Encode};
 use frame_system::AccountInfo;
-use jsonrpsee_ws_client::{traits::SubscriptionClient, v2::params::JsonRpcParams, DeserializeOwned};
-use jsonrpsee_ws_client::{Subscription, WsClient as RpcClient, WsClientBuilder as RpcClientBuilder};
-use num_traits::Zero;
+use futures::{SinkExt, StreamExt};
+use jsonrpsee_ws_client::{
+	types::{
+		self as jsonrpsee_types, traits::SubscriptionClient, v2::params::JsonRpcParams,
+		DeserializeOwned,
+	},
+	WsClient as RpcClient, WsClientBuilder as RpcClientBuilder,
+};
+use num_traits::{Bounded, Zero};
 use pallet_balances::AccountData;
-use relay_utils::relay_loop::RECONNECT_DELAY;
-use sp_core::{storage::StorageKey, Bytes};
+use pallet_transaction_payment::InclusionFee;
+use relay_utils::{relay_loop::RECONNECT_DELAY, HeaderId};
+use sp_core::{
+	storage::{StorageData, StorageKey},
+	Bytes, Hasher,
+};
+use sp_runtime::{
+	traits::Header as HeaderT,
+	transaction_validity::{TransactionSource, TransactionValidity},
+};
 use sp_trie::StorageProof;
 use sp_version::RuntimeVersion;
+use std::{convert::TryFrom, future::Future};
 
 const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities";
+const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction";
 const MAX_SUBSCRIPTION_CAPACITY: usize = 4096;
 
 /// Opaque justifications subscription type.
-pub type JustificationsSubscription = Subscription<Bytes>;
+pub struct Subscription<T>(Mutex<futures::channel::mpsc::Receiver<Option<T>>>);
 
 /// Opaque GRANDPA authorities set.
 pub type OpaqueGrandpaAuthoritiesSet = Vec<u8>;
@@ -45,21 +64,37 @@ pub type OpaqueGrandpaAuthoritiesSet = Vec<u8>;
 ///
 /// Cloning `Client` is a cheap operation.
 pub struct Client<C: Chain> {
+	/// Tokio runtime handle.
+	tokio: Arc<tokio::runtime::Runtime>,
 	/// Client connection params.
 	params: ConnectionParams,
 	/// Substrate RPC client.
 	client: Arc<RpcClient>,
 	/// Genesis block hash.
-	genesis_hash: C::Hash,
-	/// If several tasks are submitting their transactions simultaneously using `submit_signed_extrinsic`
-	/// method, they may get the same transaction nonce. So one of transactions will be rejected
-	/// from the pool. This lock is here to prevent situations like that.
+	genesis_hash: HashOf<C>,
+	/// If several tasks are submitting their transactions simultaneously using
+	/// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of
+	/// transactions will be rejected from the pool. This lock is here to prevent situations like
+	/// that.
 	submit_signed_extrinsic_lock: Arc<Mutex<()>>,
 }
 
+#[async_trait]
+impl<C: Chain> relay_utils::relay_loop::Client for Client<C> {
+	type Error = Error;
+
+	async fn reconnect(&mut self) -> Result<()> {
+		let (tokio, client) = Self::build_client(self.params.clone()).await?;
+		self.tokio = tokio;
+		self.client = client;
+		Ok(())
+	}
+}
+
 impl<C: Chain> Clone for Client<C> {
 	fn clone(&self) -> Self {
 		Client {
+			tokio: self.tokio.clone(),
 			params: self.params.clone(),
 			client: self.client.clone(),
 			genesis_hash: self.genesis_hash,
@@ -70,9 +105,7 @@ impl<C: Chain> Clone for Client<C> {
 
 impl<C: Chain> std::fmt::Debug for Client<C> {
 	fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
-		fmt.debug_struct("Client")
-			.field("genesis_hash", &self.genesis_hash)
-			.finish()
+		fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish()
 	}
 }
 
@@ -101,12 +134,18 @@ impl<C: Chain> Client<C> {
 	/// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection
 	/// has been established or error otherwise.
 	pub async fn try_connect(params: ConnectionParams) -> Result<Self> {
-		let client = Self::build_client(params.clone()).await?;
+		let (tokio, client) = Self::build_client(params.clone()).await?;
 
 		let number: C::BlockNumber = Zero::zero();
-		let genesis_hash = Substrate::<C>::chain_get_block_hash(&*client, number).await?;
+		let genesis_hash_client = client.clone();
+		let genesis_hash = tokio
+			.spawn(async move {
+				Substrate::<C>::chain_get_block_hash(&*genesis_hash_client, number).await
+			})
+			.await??;
 
 		Ok(Self {
+			tokio,
 			params,
 			client,
 			genesis_hash,
@@ -114,39 +153,43 @@ impl<C: Chain> Client<C> {
 		})
 	}
 
-	/// Reopen client connection.
-	pub async fn reconnect(&mut self) -> Result<()> {
-		self.client = Self::build_client(self.params.clone()).await?;
-		Ok(())
-	}
-
 	/// Build client to use in connection.
-	async fn build_client(params: ConnectionParams) -> Result<Arc<RpcClient>> {
+	async fn build_client(
+		params: ConnectionParams,
+	) -> Result<(Arc<tokio::runtime::Runtime>, Arc<RpcClient>)> {
+		let tokio = tokio::runtime::Runtime::new()?;
 		let uri = format!(
 			"{}://{}:{}",
 			if params.secure { "wss" } else { "ws" },
 			params.host,
 			params.port,
 		);
-		let client = RpcClientBuilder::default()
-			.max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY)
-			.build(&uri)
-			.await?;
-
-		Ok(Arc::new(client))
+		let client = tokio
+			.spawn(async move {
+				RpcClientBuilder::default()
+					.max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY)
+					.build(&uri)
+					.await
+			})
+			.await??;
+
+		Ok((Arc::new(tokio), Arc::new(client)))
 	}
 }
 
 impl<C: Chain> Client<C> {
 	/// Returns true if client is connected to at least one peer and is in synced state.
 	pub async fn ensure_synced(&self) -> Result<()> {
-		let health = Substrate::<C>::system_health(&*self.client).await?;
-		let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0);
-		if is_synced {
-			Ok(())
-		} else {
-			Err(Error::ClientNotSynced(health))
-		}
+		self.jsonrpsee_execute(|client| async move {
+			let health = Substrate::<C>::system_health(&*client).await?;
+			let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0);
+			if is_synced {
+				Ok(())
+			} else {
+				Err(Error::ClientNotSynced(health))
+			}
+		})
+		.await
 	}
 
 	/// Return hash of the genesis block.
@@ -156,7 +199,15 @@ impl<C: Chain> Client<C> {
 
 	/// Return hash of the best finalized block.
 	pub async fn best_finalized_header_hash(&self) -> Result<C::Hash> {
-		Ok(Substrate::<C>::chain_get_finalized_head(&*self.client).await?)
+		self.jsonrpsee_execute(|client| async move {
+			Ok(Substrate::<C>::chain_get_finalized_head(&*client).await?)
+		})
+		.await
+	}
+
+	/// Return number of the best finalized block.
+	pub async fn best_finalized_header_number(&self) -> Result<C::BlockNumber> {
+		Ok(*self.header_by_hash(self.best_finalized_header_hash().await?).await?.number())
 	}
 
 	/// Returns the best Substrate header.
@@ -164,12 +215,18 @@ impl<C: Chain> Client<C> {
 	where
 		C::Header: DeserializeOwned,
 	{
-		Ok(Substrate::<C>::chain_get_header(&*self.client, None).await?)
+		self.jsonrpsee_execute(|client| async move {
+			Ok(Substrate::<C>::chain_get_header(&*client, None).await?)
+		})
+		.await
 	}
 
 	/// Get a Substrate block from its hash.
 	pub async fn get_block(&self, block_hash: Option<C::Hash>) -> Result<C::SignedBlock> {
-		Ok(Substrate::<C>::chain_get_block(&*self.client, block_hash).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::chain_get_block(&*client, block_hash).await?)
+		})
+		.await
 	}
 
 	/// Get a Substrate header by its hash.
@@ -177,12 +234,18 @@ impl<C: Chain> Client<C> {
 	where
 		C::Header: DeserializeOwned,
 	{
-		Ok(Substrate::<C>::chain_get_header(&*self.client, block_hash).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::chain_get_header(&*client, block_hash).await?)
+		})
+		.await
 	}
 
 	/// Get a Substrate block hash by its number.
 	pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result<C::Hash> {
-		Ok(Substrate::<C>::chain_get_block_hash(&*self.client, number).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::chain_get_block_hash(&*client, number).await?)
+		})
+		.await
 	}
 
 	/// Get a Substrate header by its number.
@@ -191,51 +254,84 @@ impl<C: Chain> Client<C> {
 		C::Header: DeserializeOwned,
 	{
 		let block_hash = Self::block_hash_by_number(self, block_number).await?;
-		Ok(Self::header_by_hash(self, block_hash).await?)
+		let header_by_hash = Self::header_by_hash(self, block_hash).await?;
+		Ok(header_by_hash)
 	}
 
 	/// Return runtime version.
 	pub async fn runtime_version(&self) -> Result<RuntimeVersion> {
-		Ok(Substrate::<C>::state_runtime_version(&*self.client).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::state_runtime_version(&*client).await?)
+		})
+		.await
 	}
 
 	/// Read value from runtime storage.
-	pub async fn storage_value<T: Decode>(&self, storage_key: StorageKey) -> Result<Option<T>> {
-		Substrate::<C>::state_get_storage(&*self.client, storage_key)
+	pub async fn storage_value<T: Send + Decode + 'static>(
+		&self,
+		storage_key: StorageKey,
+		block_hash: Option<C::Hash>,
+	) -> Result<Option<T>> {
+		self.raw_storage_value(storage_key, block_hash)
 			.await?
-			.map(|encoded_value| T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed))
+			.map(|encoded_value| {
+				T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed)
+			})
 			.transpose()
 	}
 
+	/// Read raw value from runtime storage.
+	pub async fn raw_storage_value(
+		&self,
+		storage_key: StorageKey,
+		block_hash: Option<C::Hash>,
+	) -> Result<Option<StorageData>> {
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::state_get_storage(&*client, storage_key, block_hash).await?)
+		})
+		.await
+	}
+
 	/// Return native tokens balance of the account.
 	pub async fn free_native_balance(&self, account: C::AccountId) -> Result<C::Balance>
 	where
 		C: ChainWithBalances,
 	{
-		let storage_key = C::account_info_storage_key(&account);
-		let encoded_account_data = Substrate::<C>::state_get_storage(&*self.client, storage_key)
-			.await?
-			.ok_or(Error::AccountDoesNotExist)?;
-		let decoded_account_data =
-			AccountInfo::<C::Index, AccountData<C::Balance>>::decode(&mut &encoded_account_data.0[..])
-				.map_err(Error::ResponseParseFailed)?;
-		Ok(decoded_account_data.data.free)
+		self.jsonrpsee_execute(move |client| async move {
+			let storage_key = C::account_info_storage_key(&account);
+			let encoded_account_data =
+				Substrate::<C>::state_get_storage(&*client, storage_key, None)
+					.await?
+					.ok_or(Error::AccountDoesNotExist)?;
+			let decoded_account_data = AccountInfo::<C::Index, AccountData<C::Balance>>::decode(
+				&mut &encoded_account_data.0[..],
+			)
+			.map_err(Error::ResponseParseFailed)?;
+			Ok(decoded_account_data.data.free)
+		})
+		.await
 	}
 
 	/// Get the nonce of the given Substrate account.
 	///
 	/// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address.
 	pub async fn next_account_index(&self, account: C::AccountId) -> Result<C::Index> {
-		Ok(Substrate::<C>::system_account_next_index(&*self.client, account).await?)
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::system_account_next_index(&*client, account).await?)
+		})
+		.await
 	}
 
 	/// Submit unsigned extrinsic for inclusion in a block.
 	///
 	/// Note: The given transaction needs to be SCALE encoded beforehand.
 	pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result<C::Hash> {
-		let tx_hash = Substrate::<C>::author_submit_extrinsic(&*self.client, transaction).await?;
-		log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash);
-		Ok(tx_hash)
+		self.jsonrpsee_execute(move |client| async move {
+			let tx_hash = Substrate::<C>::author_submit_extrinsic(&*client, transaction).await?;
+			log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash);
+			Ok(tx_hash)
+		})
+		.await
 	}
 
 	/// Submit an extrinsic signed by given account.
@@ -248,51 +344,242 @@ impl<C: Chain> Client<C> {
 	pub async fn submit_signed_extrinsic(
 		&self,
 		extrinsic_signer: C::AccountId,
-		prepare_extrinsic: impl FnOnce(C::Index) -> Bytes,
+		prepare_extrinsic: impl FnOnce(HeaderIdOf<C>, C::Index) -> Bytes + Send + 'static,
 	) -> Result<C::Hash> {
 		let _guard = self.submit_signed_extrinsic_lock.lock().await;
 		let transaction_nonce = self.next_account_index(extrinsic_signer).await?;
-		let extrinsic = prepare_extrinsic(transaction_nonce);
-		let tx_hash = Substrate::<C>::author_submit_extrinsic(&*self.client, extrinsic).await?;
-		log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash);
-		Ok(tx_hash)
+		let best_header = self.best_header().await?;
+		let best_header_id = HeaderId(*best_header.number(), best_header.hash());
+		self.jsonrpsee_execute(move |client| async move {
+			let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce);
+			let tx_hash = Substrate::<C>::author_submit_extrinsic(&*client, extrinsic).await?;
+			log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash);
+			Ok(tx_hash)
+		})
+		.await
+	}
+
+	/// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status
+	/// after submission.
+	pub async fn submit_and_watch_signed_extrinsic(
+		&self,
+		extrinsic_signer: C::AccountId,
+		prepare_extrinsic: impl FnOnce(HeaderIdOf<C>, C::Index) -> Bytes + Send + 'static,
+	) -> Result<Subscription<TransactionStatusOf<C>>> {
+		let _guard = self.submit_signed_extrinsic_lock.lock().await;
+		let transaction_nonce = self.next_account_index(extrinsic_signer).await?;
+		let best_header = self.best_header().await?;
+		let best_header_id = HeaderId(*best_header.number(), best_header.hash());
+		let subscription = self
+			.jsonrpsee_execute(move |client| async move {
+				let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce);
+				let tx_hash = C::Hasher::hash(&extrinsic.0);
+				let subscription = client
+					.subscribe(
+						"author_submitAndWatchExtrinsic",
+						JsonRpcParams::Array(vec![jsonrpsee_types::to_json_value(extrinsic)
+							.map_err(|e| Error::RpcError(e.into()))?]),
+						"author_unwatchExtrinsic",
+					)
+					.await?;
+				log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash);
+				Ok(subscription)
+			})
+			.await?;
+		let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY);
+		self.tokio.spawn(Subscription::background_worker(
+			C::NAME.into(),
+			"extrinsic".into(),
+			subscription,
+			sender,
+		));
+		Ok(Subscription(Mutex::new(receiver)))
+	}
+
+	/// Returns pending extrinsics from transaction pool.
+	pub async fn pending_extrinsics(&self) -> Result<Vec<Bytes>> {
+		self.jsonrpsee_execute(move |client| async move {
+			Ok(Substrate::<C>::author_pending_extrinsics(&*client).await?)
+		})
+		.await
+	}
+
+	/// Validate transaction at given block state.
+	pub async fn validate_transaction<SignedTransaction: Encode + Send + 'static>(
+		&self,
+		at_block: C::Hash,
+		transaction: SignedTransaction,
+	) -> Result<TransactionValidity> {
+		self.jsonrpsee_execute(move |client| async move {
+			let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string();
+			let data = Bytes((TransactionSource::External, transaction, at_block).encode());
+
+			let encoded_response =
+				Substrate::<C>::state_call(&*client, call, data, Some(at_block)).await?;
+			let validity = TransactionValidity::decode(&mut &encoded_response.0[..])
+				.map_err(Error::ResponseParseFailed)?;
+
+			Ok(validity)
+		})
+		.await
+	}
+
+	/// Estimate fee that will be spent on given extrinsic.
+	pub async fn estimate_extrinsic_fee(
+		&self,
+		transaction: Bytes,
+	) -> Result<InclusionFee<C::Balance>> {
+		self.jsonrpsee_execute(move |client| async move {
+			let fee_details =
+				Substrate::<C>::payment_query_fee_details(&*client, transaction, None).await?;
+			let inclusion_fee = fee_details
+				.inclusion_fee
+				.map(|inclusion_fee| InclusionFee {
+					base_fee: C::Balance::try_from(inclusion_fee.base_fee.into_u256())
+						.unwrap_or_else(|_| C::Balance::max_value()),
+					len_fee: C::Balance::try_from(inclusion_fee.len_fee.into_u256())
+						.unwrap_or_else(|_| C::Balance::max_value()),
+					adjusted_weight_fee: C::Balance::try_from(
+						inclusion_fee.adjusted_weight_fee.into_u256(),
+					)
+					.unwrap_or_else(|_| C::Balance::max_value()),
+				})
+				.unwrap_or_else(|| InclusionFee {
+					base_fee: Zero::zero(),
+					len_fee: Zero::zero(),
+					adjusted_weight_fee: Zero::zero(),
+				});
+			Ok(inclusion_fee)
+		})
+		.await
 	}
 
 	/// Get the GRANDPA authority set at given block.
-	pub async fn grandpa_authorities_set(&self, block: C::Hash) -> Result<OpaqueGrandpaAuthoritiesSet> {
-		let call = SUB_API_GRANDPA_AUTHORITIES.to_string();
-		let data = Bytes(Vec::new());
+	pub async fn grandpa_authorities_set(
+		&self,
+		block: C::Hash,
+	) -> Result<OpaqueGrandpaAuthoritiesSet> {
+		self.jsonrpsee_execute(move |client| async move {
+			let call = SUB_API_GRANDPA_AUTHORITIES.to_string();
+			let data = Bytes(Vec::new());
 
-		let encoded_response = Substrate::<C>::state_call(&*self.client, call, data, Some(block)).await?;
-		let authority_list = encoded_response.0;
+			let encoded_response =
+				Substrate::<C>::state_call(&*client, call, data, Some(block)).await?;
+			let authority_list = encoded_response.0;
 
-		Ok(authority_list)
+			Ok(authority_list)
+		})
+		.await
 	}
 
 	/// Execute runtime call at given block.
-	pub async fn state_call(&self, method: String, data: Bytes, at_block: Option<C::Hash>) -> Result<Bytes> {
-		Substrate::<C>::state_call(&*self.client, method, data, at_block)
-			.await
-			.map_err(Into::into)
+	pub async fn state_call(
+		&self,
+		method: String,
+		data: Bytes,
+		at_block: Option<C::Hash>,
+	) -> Result<Bytes> {
+		self.jsonrpsee_execute(move |client| async move {
+			Substrate::<C>::state_call(&*client, method, data, at_block)
+				.await
+				.map_err(Into::into)
+		})
+		.await
 	}
 
 	/// Returns storage proof of given storage keys.
-	pub async fn prove_storage(&self, keys: Vec<StorageKey>, at_block: C::Hash) -> Result<StorageProof> {
-		Substrate::<C>::state_prove_storage(&*self.client, keys, Some(at_block))
-			.await
-			.map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect()))
-			.map_err(Into::into)
+	pub async fn prove_storage(
+		&self,
+		keys: Vec<StorageKey>,
+		at_block: C::Hash,
+	) -> Result<StorageProof> {
+		self.jsonrpsee_execute(move |client| async move {
+			Substrate::<C>::state_prove_storage(&*client, keys, Some(at_block))
+				.await
+				.map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect()))
+				.map_err(Into::into)
+		})
+		.await
 	}
 
 	/// Return new justifications stream.
-	pub async fn subscribe_justifications(&self) -> Result<JustificationsSubscription> {
-		Ok(self
-			.client
-			.subscribe(
-				"grandpa_subscribeJustifications",
-				JsonRpcParams::NoParams,
-				"grandpa_unsubscribeJustifications",
-			)
-			.await?)
+	pub async fn subscribe_justifications(&self) -> Result<Subscription<Bytes>> {
+		let subscription = self
+			.jsonrpsee_execute(move |client| async move {
+				Ok(client
+					.subscribe(
+						"grandpa_subscribeJustifications",
+						JsonRpcParams::NoParams,
+						"grandpa_unsubscribeJustifications",
+					)
+					.await?)
+			})
+			.await?;
+		let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY);
+		self.tokio.spawn(Subscription::background_worker(
+			C::NAME.into(),
+			"justification".into(),
+			subscription,
+			sender,
+		));
+		Ok(Subscription(Mutex::new(receiver)))
+	}
+
+	/// Execute jsonrpsee future in tokio context.
+	async fn jsonrpsee_execute<MF, F, T>(&self, make_jsonrpsee_future: MF) -> Result<T>
+	where
+		MF: FnOnce(Arc<RpcClient>) -> F + Send + 'static,
+		F: Future<Output = Result<T>> + Send,
+		T: Send + 'static,
+	{
+		let client = self.client.clone();
+		self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await?
+	}
+}
+
+impl<T: DeserializeOwned> Subscription<T> {
+	/// Return next item from the subscription.
+	pub async fn next(&self) -> Result<Option<T>> {
+		let mut receiver = self.0.lock().await;
+		let item = receiver.next().await;
+		Ok(item.unwrap_or(None))
+	}
+
+	/// Background worker that is executed in tokio context as `jsonrpsee` requires.
+	async fn background_worker(
+		chain_name: String,
+		item_type: String,
+		mut subscription: jsonrpsee_types::Subscription<T>,
+		mut sender: futures::channel::mpsc::Sender<Option<T>>,
+	) {
+		loop {
+			match subscription.next().await {
+				Ok(Some(item)) =>
+					if sender.send(Some(item)).await.is_err() {
+						break
+					},
+				Ok(None) => {
+					log::trace!(
+						target: "bridge",
+						"{} {} subscription stream has returned None. Stream needs to be restarted.",
+						chain_name,
+						item_type,
+					);
+					let _ = sender.send(None).await;
+					break
+				},
+				Err(e) => {
+					log::trace!(
+						target: "bridge",
+						"{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.",
+						chain_name,
+						item_type,
+						e,
+					);
+					let _ = sender.send(None).await;
+					break
+				},
+			}
+		}
 	}
 }
diff --git a/polkadot/bridges/relays/client-substrate/src/error.rs b/polkadot/bridges/relays/client-substrate/src/error.rs
index 304229ede1986b733328a3c35d11ee59c8f38f05..33b9b22a03efe4d74f9375114cb4c5aa22663eb9 100644
--- a/polkadot/bridges/relays/client-substrate/src/error.rs
+++ b/polkadot/bridges/relays/client-substrate/src/error.rs
@@ -16,54 +16,55 @@
 
 //! Substrate node RPC errors.
 
-use jsonrpsee_ws_client::Error as RpcError;
+use jsonrpsee_ws_client::types::Error as RpcError;
 use relay_utils::MaybeConnectionError;
 use sc_rpc_api::system::Health;
+use sp_runtime::transaction_validity::TransactionValidityError;
+use thiserror::Error;
 
 /// Result type used by Substrate client.
 pub type Result<T> = std::result::Result<T, Error>;
 
 /// Errors that can occur only when interacting with
 /// a Substrate node through RPC.
-#[derive(Debug)]
+#[derive(Error, Debug)]
 pub enum Error {
+	/// IO error.
+	#[error("IO error: {0}")]
+	Io(#[from] std::io::Error),
 	/// An error that can occur when making a request to
 	/// an JSON-RPC server.
-	RpcError(RpcError),
+	#[error("RPC error: {0}")]
+	RpcError(#[from] RpcError),
 	/// The response from the server could not be SCALE decoded.
-	ResponseParseFailed(codec::Error),
+	#[error("Response parse failed: {0}")]
+	ResponseParseFailed(#[from] codec::Error),
 	/// The Substrate bridge pallet has not yet been initialized.
+	#[error("The Substrate bridge pallet has not been initialized yet.")]
 	UninitializedBridgePallet,
 	/// Account does not exist on the chain.
+	#[error("Account does not exist on the chain.")]
 	AccountDoesNotExist,
 	/// Runtime storage is missing mandatory ":code:" entry.
+	#[error("Mandatory :code: entry is missing from runtime storage.")]
 	MissingMandatoryCodeEntry,
 	/// The client we're connected to is not synced, so we can't rely on its state.
+	#[error("Substrate client is not synced {0}.")]
 	ClientNotSynced(Health),
 	/// An error has happened when we have tried to parse storage proof.
+	#[error("Error when parsing storage proof: {0:?}.")]
 	StorageProofError(bp_runtime::StorageProofError),
+	/// The Substrate transaction is invalid.
+	#[error("Substrate transaction is invalid: {0:?}")]
+	TransactionInvalid(#[from] TransactionValidityError),
 	/// Custom logic error.
+	#[error("{0}")]
 	Custom(String),
 }
 
-impl std::error::Error for Error {
-	fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
-		match self {
-			Self::RpcError(ref e) => Some(e),
-			Self::ResponseParseFailed(ref e) => Some(e),
-			Self::UninitializedBridgePallet => None,
-			Self::AccountDoesNotExist => None,
-			Self::MissingMandatoryCodeEntry => None,
-			Self::ClientNotSynced(_) => None,
-			Self::StorageProofError(_) => None,
-			Self::Custom(_) => None,
-		}
-	}
-}
-
-impl From<RpcError> for Error {
-	fn from(error: RpcError) -> Self {
-		Error::RpcError(error)
+impl From<tokio::task::JoinError> for Error {
+	fn from(error: tokio::task::JoinError) -> Self {
+		Error::Custom(format!("Failed to wait tokio task: {}", error))
 	}
 }
 
@@ -71,7 +72,7 @@ impl MaybeConnectionError for Error {
 	fn is_connection_error(&self) -> bool {
 		matches!(
 			*self,
-			Error::RpcError(RpcError::TransportError(_))
+			Error::RpcError(RpcError::Transport(_))
 				// right now if connection to the ws server is dropped (after it is already established),
 				// we're getting this error
 				| Error::RpcError(RpcError::Internal(_))
@@ -80,26 +81,3 @@ impl MaybeConnectionError for Error {
 		)
 	}
 }
-
-impl std::fmt::Display for Error {
-	fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
-		let s = match self {
-			Self::RpcError(e) => e.to_string(),
-			Self::ResponseParseFailed(e) => e.to_string(),
-			Self::UninitializedBridgePallet => "The Substrate bridge pallet has not been initialized yet.".into(),
-			Self::AccountDoesNotExist => "Account does not exist on the chain".into(),
-			Self::MissingMandatoryCodeEntry => "Mandatory :code: entry is missing from runtime storage".into(),
-			Self::StorageProofError(e) => format!("Error when parsing storage proof: {:?}", e),
-			Self::ClientNotSynced(health) => format!("Substrate client is not synced: {}", health),
-			Self::Custom(e) => e.clone(),
-		};
-
-		write!(f, "{}", s)
-	}
-}
-
-impl From<Error> for String {
-	fn from(error: Error) -> String {
-		error.to_string()
-	}
-}
diff --git a/polkadot/bridges/relays/client-substrate/src/finality_source.rs b/polkadot/bridges/relays/client-substrate/src/finality_source.rs
index 72a11ae99003b378286bc8b25694cf7a06c98ad8..98526de178cb3de5654d61da1b0a1f0737a495a6 100644
--- a/polkadot/bridges/relays/client-substrate/src/finality_source.rs
+++ b/polkadot/bridges/relays/client-substrate/src/finality_source.rs
@@ -16,10 +16,12 @@
 
 //! Default generic implementation of finality source for basic Substrate client.
 
-use crate::chain::{BlockWithJustification, Chain};
-use crate::client::Client;
-use crate::error::Error;
-use crate::sync_header::SyncHeader;
+use crate::{
+	chain::{BlockWithJustification, Chain},
+	client::Client,
+	error::Error,
+	sync_header::SyncHeader,
+};
 
 use async_std::sync::{Arc, Mutex};
 use async_trait::async_trait;
@@ -43,12 +45,11 @@ pub struct FinalitySource<C: Chain, P> {
 
 impl<C: Chain, P> FinalitySource<C, P> {
 	/// Create new headers source using given client.
-	pub fn new(client: Client<C>, maximal_header_number: Option<RequiredHeaderNumberRef<C>>) -> Self {
-		FinalitySource {
-			client,
-			maximal_header_number,
-			_phantom: Default::default(),
-		}
+	pub fn new(
+		client: Client<C>,
+		maximal_header_number: Option<RequiredHeaderNumberRef<C>>,
+	) -> Self {
+		FinalitySource { client, maximal_header_number, _phantom: Default::default() }
 	}
 
 	/// Returns reference to the underlying RPC client.
@@ -122,7 +123,9 @@ where
 
 		let justification = signed_block
 			.justification()
-			.map(|raw_justification| GrandpaJustification::<C::Header>::decode(&mut raw_justification.as_slice()))
+			.map(|raw_justification| {
+				GrandpaJustification::<C::Header>::decode(&mut raw_justification.as_slice())
+			})
 			.transpose()
 			.map_err(Error::ResponseParseFailed)?;
 
@@ -132,27 +135,35 @@ where
 	async fn finality_proofs(&self) -> Result<Self::FinalityProofsStream, Error> {
 		Ok(unfold(
 			self.client.clone().subscribe_justifications().await?,
-			move |mut subscription| async move {
+			move |subscription| async move {
 				loop {
-					let next_justification = subscription.next().await?;
+					let log_error = |err| {
+						log::error!(
+							target: "bridge",
+							"Failed to read justification target from the {} justifications stream: {:?}",
+							P::SOURCE_NAME,
+							err,
+						);
+					};
+
+					let next_justification = subscription
+						.next()
+						.await
+						.map_err(|err| log_error(err.to_string()))
+						.ok()??;
+
 					let decoded_justification =
-						GrandpaJustification::<C::Header>::decode(&mut &next_justification.0[..]);
+						GrandpaJustification::<C::Header>::decode(&mut &next_justification[..]);
 
 					let justification = match decoded_justification {
 						Ok(j) => j,
 						Err(err) => {
-							log::error!(
-								target: "bridge",
-								"Failed to decode justification target from the {} justifications stream: {:?}",
-								P::SOURCE_NAME,
-								err,
-							);
-
-							continue;
-						}
+							log_error(format!("decode failed with error {:?}", err));
+							continue
+						},
 					};
 
-					return Some((justification, subscription));
+					return Some((justification, subscription))
 				}
 			},
 		)
diff --git a/polkadot/bridges/relays/client-substrate/src/guard.rs b/polkadot/bridges/relays/client-substrate/src/guard.rs
index c6e191ce078ff617205021eb1142ac9265bff2e0..a064e36234007785e58776f0b1da836d9f81370e 100644
--- a/polkadot/bridges/relays/client-substrate/src/guard.rs
+++ b/polkadot/bridges/relays/client-substrate/src/guard.rs
@@ -17,32 +17,41 @@
 //! Pallet provides a set of guard functions that are running in background threads
 //! and are aborting process if some condition fails.
 
-use crate::{Chain, ChainWithBalances, Client};
+use crate::{error::Error, Chain, ChainWithBalances, Client};
 
 use async_trait::async_trait;
 use num_traits::CheckedSub;
 use sp_version::RuntimeVersion;
 use std::{
 	collections::VecDeque,
+	fmt::Display,
 	time::{Duration, Instant},
 };
 
 /// Guards environment.
 #[async_trait]
 pub trait Environment<C: ChainWithBalances>: Send + Sync + 'static {
+	/// Error type.
+	type Error: Display + Send + Sync + 'static;
+
 	/// Return current runtime version.
-	async fn runtime_version(&mut self) -> Result<RuntimeVersion, String>;
+	async fn runtime_version(&mut self) -> Result<RuntimeVersion, Self::Error>;
 	/// Return free native balance of the account on the chain.
-	async fn free_native_balance(&mut self, account: C::AccountId) -> Result<C::Balance, String>;
+	async fn free_native_balance(
+		&mut self,
+		account: C::AccountId,
+	) -> Result<C::Balance, Self::Error>;
 
 	/// Return current time.
 	fn now(&self) -> Instant {
 		Instant::now()
 	}
+
 	/// Sleep given amount of time.
 	async fn sleep(&mut self, duration: Duration) {
 		async_std::task::sleep(duration).await
 	}
+
 	/// Abort current process. Called when guard condition check fails.
 	async fn abort(&mut self) {
 		std::process::abort();
@@ -50,7 +59,10 @@ pub trait Environment<C: ChainWithBalances>: Send + Sync + 'static {
 }
 
 /// Abort when runtime spec version is different from specified.
-pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environment<C>, expected_spec_version: u32) {
+pub fn abort_on_spec_version_change<C: ChainWithBalances>(
+	mut env: impl Environment<C>,
+	expected_spec_version: u32,
+) {
 	async_std::task::spawn(async move {
 		loop {
 			let actual_spec_version = env.runtime_version().await;
@@ -66,10 +78,10 @@ pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environm
 					);
 
 					env.abort().await;
-				}
+				},
 				Err(error) => log::warn!(
 					target: "bridge-guard",
-					"Failed to read {} runtime version: {:?}. Relay may need to be stopped manually",
+					"Failed to read {} runtime version: {}. Relay may need to be stopped manually",
 					C::NAME,
 					error,
 				),
@@ -80,8 +92,9 @@ pub fn abort_on_spec_version_change<C: ChainWithBalances>(mut env: impl Environm
 	});
 }
 
-/// Abort if, during a 24 hours, free balance of given account is decreased at least by given value.
-/// Other components may increase (or decrease) balance of account and it WILL affect logic of the guard.
+/// Abort if, during 24 hours, free balance of given account is decreased at least by given value.
+/// Other components may increase (or decrease) balance of account and it WILL affect logic of the
+/// guard.
 pub fn abort_when_account_balance_decreased<C: ChainWithBalances>(
 	mut env: impl Environment<C>,
 	account_id: C::AccountId,
@@ -127,16 +140,16 @@ pub fn abort_when_account_balance_decreased<C: ChainWithBalances>(
 
 						env.abort().await;
 					}
-				}
+				},
 				Err(error) => {
 					log::warn!(
 						target: "bridge-guard",
-						"Failed to read {} account {:?} balance: {:?}. Relay may need to be stopped manually",
+						"Failed to read {} account {:?} balance: {}. Relay may need to be stopped manually",
 						C::NAME,
 						account_id,
 						error,
 					);
-				}
+				},
 			};
 
 			env.sleep(conditions_check_delay::<C>()).await;
@@ -151,20 +164,24 @@ fn conditions_check_delay<C: Chain>() -> Duration {
 
 #[async_trait]
 impl<C: ChainWithBalances> Environment<C> for Client<C> {
-	async fn runtime_version(&mut self) -> Result<RuntimeVersion, String> {
-		Client::<C>::runtime_version(self).await.map_err(|e| e.to_string())
+	type Error = Error;
+
+	async fn runtime_version(&mut self) -> Result<RuntimeVersion, Self::Error> {
+		Client::<C>::runtime_version(self).await
 	}
 
-	async fn free_native_balance(&mut self, account: C::AccountId) -> Result<C::Balance, String> {
-		Client::<C>::free_native_balance(self, account)
-			.await
-			.map_err(|e| e.to_string())
+	async fn free_native_balance(
+		&mut self,
+		account: C::AccountId,
+	) -> Result<C::Balance, Self::Error> {
+		Client::<C>::free_native_balance(self, account).await
 	}
 }
 
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use frame_support::weights::IdentityFee;
 	use futures::{
 		channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender},
 		future::FutureExt,
@@ -180,18 +197,24 @@ mod tests {
 		type Hash = sp_core::H256;
 		type Hasher = sp_runtime::traits::BlakeTwo256;
 		type Header = sp_runtime::generic::Header<u32, sp_runtime::traits::BlakeTwo256>;
+
+		type AccountId = u32;
+		type Balance = u32;
+		type Index = u32;
+		type Signature = sp_runtime::testing::TestSignature;
 	}
 
 	impl Chain for TestChain {
 		const NAME: &'static str = "Test";
 		const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(1);
+		const STORAGE_PROOF_OVERHEAD: u32 = 0;
+		const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 0;
 
-		type AccountId = u32;
-		type Index = u32;
-		type SignedBlock =
-			sp_runtime::generic::SignedBlock<sp_runtime::generic::Block<Self::Header, sp_runtime::OpaqueExtrinsic>>;
+		type SignedBlock = sp_runtime::generic::SignedBlock<
+			sp_runtime::generic::Block<Self::Header, sp_runtime::OpaqueExtrinsic>,
+		>;
 		type Call = ();
-		type Balance = u32;
+		type WeightToFee = IdentityFee<u32>;
 	}
 
 	impl ChainWithBalances for TestChain {
@@ -209,11 +232,13 @@ mod tests {
 
 	#[async_trait]
 	impl Environment<TestChain> for TestEnvironment {
-		async fn runtime_version(&mut self) -> Result<RuntimeVersion, String> {
+		type Error = Error;
+
+		async fn runtime_version(&mut self) -> Result<RuntimeVersion, Self::Error> {
 			Ok(self.runtime_version_rx.next().await.unwrap_or_default())
 		}
 
-		async fn free_native_balance(&mut self, _account: u32) -> Result<u32, String> {
+		async fn free_native_balance(&mut self, _account: u32) -> Result<u32, Self::Error> {
 			Ok(self.free_native_balance_rx.next().await.unwrap_or_default())
 		}
 
@@ -249,10 +274,7 @@ mod tests {
 
 			// client responds with wrong version
 			runtime_version_tx
-				.send(RuntimeVersion {
-					spec_version: 42,
-					..Default::default()
-				})
+				.send(RuntimeVersion { spec_version: 42, ..Default::default() })
 				.await
 				.unwrap();
 
@@ -284,10 +306,7 @@ mod tests {
 
 			// client responds with the same version
 			runtime_version_tx
-				.send(RuntimeVersion {
-					spec_version: 42,
-					..Default::default()
-				})
+				.send(RuntimeVersion { spec_version: 42, ..Default::default() })
 				.await
 				.unwrap();
 
diff --git a/polkadot/bridges/relays/client-substrate/src/headers_source.rs b/polkadot/bridges/relays/client-substrate/src/headers_source.rs
index 3dfcb220de4530b38db1e7c3a0ca3b68feb51dff..e3839bf2c8ba103cd52335458797d688c7cb4525 100644
--- a/polkadot/bridges/relays/client-substrate/src/headers_source.rs
+++ b/polkadot/bridges/relays/client-substrate/src/headers_source.rs
@@ -16,9 +16,11 @@
 
 //! Default generic implementation of headers source for basic Substrate client.
 
-use crate::chain::{BlockWithJustification, Chain};
-use crate::client::Client;
-use crate::error::Error;
+use crate::{
+	chain::{BlockWithJustification, Chain},
+	client::Client,
+	error::Error,
+};
 
 use async_trait::async_trait;
 use headers_relay::{
@@ -38,19 +40,13 @@ pub struct HeadersSource<C: Chain, P> {
 impl<C: Chain, P> HeadersSource<C, P> {
 	/// Create new headers source using given client.
 	pub fn new(client: Client<C>) -> Self {
-		HeadersSource {
-			client,
-			_phantom: Default::default(),
-		}
+		HeadersSource { client, _phantom: Default::default() }
 	}
 }
 
 impl<C: Chain, P> Clone for HeadersSource<C, P> {
 	fn clone(&self) -> Self {
-		HeadersSource {
-			client: self.client.clone(),
-			_phantom: Default::default(),
-		}
+		HeadersSource { client: self.client.clone(), _phantom: Default::default() }
 	}
 }
 
@@ -69,7 +65,12 @@ where
 	C: Chain,
 	C::BlockNumber: relay_utils::BlockNumberBase,
 	C::Header: Into<P::Header>,
-	P: HeadersSyncPipeline<Extra = (), Completion = EncodedJustification, Hash = C::Hash, Number = C::BlockNumber>,
+	P: HeadersSyncPipeline<
+		Extra = (),
+		Completion = EncodedJustification,
+		Hash = C::Hash,
+		Number = C::BlockNumber,
+	>,
 	P::Header: SourceHeader<C::Hash, C::BlockNumber>,
 {
 	async fn best_block_number(&self) -> Result<P::Number, Error> {
@@ -79,22 +80,17 @@ where
 	}
 
 	async fn header_by_hash(&self, hash: P::Hash) -> Result<P::Header, Error> {
-		self.client
-			.header_by_hash(hash)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_hash(hash).await.map(Into::into).map_err(Into::into)
 	}
 
 	async fn header_by_number(&self, number: P::Number) -> Result<P::Header, Error> {
-		self.client
-			.header_by_number(number)
-			.await
-			.map(Into::into)
-			.map_err(Into::into)
+		self.client.header_by_number(number).await.map(Into::into).map_err(Into::into)
 	}
 
-	async fn header_completion(&self, id: HeaderIdOf<P>) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Error> {
+	async fn header_completion(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Error> {
 		let hash = id.1;
 		let signed_block = self.client.get_block(Some(hash)).await?;
 		let grandpa_justification = signed_block.justification().cloned();
@@ -102,7 +98,11 @@ where
 		Ok((id, grandpa_justification))
 	}
 
-	async fn header_extra(&self, id: HeaderIdOf<P>, _header: QueuedHeader<P>) -> Result<(HeaderIdOf<P>, ()), Error> {
+	async fn header_extra(
+		&self,
+		id: HeaderIdOf<P>,
+		_header: QueuedHeader<P>,
+	) -> Result<(HeaderIdOf<P>, ()), Error> {
 		Ok((id, ()))
 	}
 }
diff --git a/polkadot/bridges/relays/client-substrate/src/lib.rs b/polkadot/bridges/relays/client-substrate/src/lib.rs
index 44895dcdc6e4b95bb24e92f6a44d1b264d5e1d9b..1f6606ea287c3f599ac52014ec9d2194b1e0aa46 100644
--- a/polkadot/bridges/relays/client-substrate/src/lib.rs
+++ b/polkadot/bridges/relays/client-substrate/src/lib.rs
@@ -29,11 +29,21 @@ pub mod guard;
 pub mod headers_source;
 pub mod metrics;
 
-pub use crate::chain::{BlockWithJustification, Chain, ChainWithBalances, TransactionSignScheme};
-pub use crate::client::{Client, JustificationsSubscription, OpaqueGrandpaAuthoritiesSet};
-pub use crate::error::{Error, Result};
-pub use crate::sync_header::SyncHeader;
-pub use bp_runtime::{BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf};
+use std::time::Duration;
+
+pub use crate::{
+	chain::{
+		BlockWithJustification, CallOf, Chain, ChainWithBalances, TransactionSignScheme,
+		TransactionStatusOf, UnsignedTransaction, WeightToFeeOf,
+	},
+	client::{Client, OpaqueGrandpaAuthoritiesSet, Subscription},
+	error::{Error, Result},
+	sync_header::SyncHeader,
+};
+pub use bp_runtime::{
+	AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf,
+	IndexOf, SignatureOf, TransactionEra, TransactionEraOf,
+};
 
 /// Header id used by the chain.
 pub type HeaderIdOf<C> = relay_utils::HeaderId<HashOf<C>, BlockNumberOf<C>>;
@@ -41,7 +51,7 @@ pub type HeaderIdOf<C> = relay_utils::HeaderId<HashOf<C>, BlockNumberOf<C>>;
 /// Substrate-over-websocket connection params.
 #[derive(Debug, Clone)]
 pub struct ConnectionParams {
-	/// Websocket server hostname.
+	/// Websocket server host name.
 	pub host: String,
 	/// Websocket server TCP port.
 	pub port: u16,
@@ -51,10 +61,48 @@ pub struct ConnectionParams {
 
 impl Default for ConnectionParams {
 	fn default() -> Self {
-		ConnectionParams {
-			host: "localhost".into(),
-			port: 9944,
-			secure: false,
-		}
+		ConnectionParams { host: "localhost".into(), port: 9944, secure: false }
 	}
 }
+
+/// Returns stall timeout for relay loop.
+///
+/// Relay considers himself stalled if he has submitted transaction to the node, but it has not
+/// been mined for this period.
+pub fn transaction_stall_timeout(
+	mortality_period: Option<u32>,
+	average_block_interval: Duration,
+	default_stall_timeout: Duration,
+) -> Duration {
+	// 1 extra block for transaction to reach the pool && 1 for relayer to awake after it is mined
+	mortality_period
+		.map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1))
+		.unwrap_or(default_stall_timeout)
+}
+
+/// Returns stall timeout for relay loop that submit transactions to two chains.
+///
+/// Bidirectional relay may have two active transactions. Even if one of them has been spoiled, we
+/// can't just restart the loop - the other transaction may still be alive and we'll be submitting
+/// duplicate transaction, which may result in funds loss. So we'll be selecting maximal mortality
+/// for choosing loop stall timeout.
+pub fn bidirectional_transaction_stall_timeout(
+	left_mortality_period: Option<u32>,
+	right_mortality_period: Option<u32>,
+	left_average_block_interval: Duration,
+	right_average_block_interval: Duration,
+	default_stall_timeout: Duration,
+) -> Duration {
+	std::cmp::max(
+		transaction_stall_timeout(
+			left_mortality_period,
+			left_average_block_interval,
+			default_stall_timeout,
+		),
+		transaction_stall_timeout(
+			right_mortality_period,
+			right_average_block_interval,
+			default_stall_timeout,
+		),
+	)
+}
diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
index f3ba8988eea4ab818ccbaf4447400296cd497454..f591a7a98105e1ea76e22ca72630670fd9ae8346 100644
--- a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
+++ b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs
@@ -14,12 +14,14 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::chain::Chain;
-use crate::client::Client;
+use crate::{chain::Chain, client::Client};
 
+use async_std::sync::{Arc, RwLock};
 use async_trait::async_trait;
 use codec::Decode;
-use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64};
+use relay_utils::metrics::{
+	metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics, F64,
+};
 use sp_core::storage::StorageKey;
 use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber};
 use std::time::Duration;
@@ -34,6 +36,7 @@ pub struct FloatStorageValueMetric<C: Chain, T: Clone> {
 	storage_key: StorageKey,
 	maybe_default_value: Option<T>,
 	metric: Gauge<F64>,
+	shared_value_ref: F64SharedRef,
 }
 
 impl<C: Chain, T: Decode + FixedPointNumber> FloatStorageValueMetric<C, T> {
@@ -47,13 +50,20 @@ impl<C: Chain, T: Decode + FixedPointNumber> FloatStorageValueMetric<C, T> {
 		name: String,
 		help: String,
 	) -> Result<Self, PrometheusError> {
+		let shared_value_ref = Arc::new(RwLock::new(None));
 		Ok(FloatStorageValueMetric {
 			client,
 			storage_key,
 			maybe_default_value,
 			metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?,
+			shared_value_ref,
 		})
 	}
+
+	/// Get shared reference to metric value.
+	pub fn shared_value_ref(&self) -> F64SharedRef {
+		self.shared_value_ref.clone()
+	}
 }
 
 #[async_trait]
@@ -66,17 +76,18 @@ where
 	}
 
 	async fn update(&self) {
-		relay_utils::metrics::set_gauge_value(
-			&self.metric,
-			self.client
-				.storage_value::<T>(self.storage_key.clone())
-				.await
-				.map(|maybe_storage_value| {
-					maybe_storage_value.or(self.maybe_default_value).map(|storage_value| {
-						storage_value.into_inner().unique_saturated_into() as f64
-							/ T::DIV.unique_saturated_into() as f64
-					})
-				}),
-		);
+		let value = self
+			.client
+			.storage_value::<T>(self.storage_key.clone(), None)
+			.await
+			.map(|maybe_storage_value| {
+				maybe_storage_value.or(self.maybe_default_value).map(|storage_value| {
+					storage_value.into_inner().unique_saturated_into() as f64 /
+						T::DIV.unique_saturated_into() as f64
+				})
+			})
+			.map_err(drop);
+		relay_utils::metrics::set_gauge_value(&self.metric, value);
+		*self.shared_value_ref.write().await = value.ok().and_then(|x| x);
 	}
 }
diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
index 526fe1e048bfcc9f2b77940e2f4829ae35d9c0fc..c3b69c32f5728dfd9b823d24c6410c0c14b7d6a2 100644
--- a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
+++ b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs
@@ -14,12 +14,12 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::chain::Chain;
-use crate::client::Client;
-use crate::error::Error;
+use crate::{chain::Chain, client::Client, error::Error};
 
 use async_trait::async_trait;
-use relay_utils::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64};
+use relay_utils::metrics::{
+	metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, U64,
+};
 use sp_core::storage::StorageKey;
 use sp_runtime::traits::Header as HeaderT;
 use sp_storage::well_known_keys::CODE;
@@ -40,10 +40,7 @@ pub struct StorageProofOverheadMetric<C: Chain> {
 
 impl<C: Chain> Clone for StorageProofOverheadMetric<C> {
 	fn clone(&self) -> Self {
-		StorageProofOverheadMetric {
-			client: self.client.clone(),
-			metric: self.metric.clone(),
-		}
+		StorageProofOverheadMetric { client: self.client.clone(), metric: self.metric.clone() }
 	}
 }
 
@@ -73,15 +70,15 @@ impl<C: Chain> StorageProofOverheadMetric<C> {
 			.await?;
 		let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum();
 
-		let storage_value_reader =
-			bp_runtime::StorageProofChecker::<C::Hasher>::new(*best_header.state_root(), storage_proof)
-				.map_err(Error::StorageProofError)?;
-		let maybe_encoded_storage_value = storage_value_reader
-			.read_value(CODE)
-			.map_err(Error::StorageProofError)?;
-		let encoded_storage_value_size = maybe_encoded_storage_value
-			.ok_or(Error::MissingMandatoryCodeEntry)?
-			.len();
+		let storage_value_reader = bp_runtime::StorageProofChecker::<C::Hasher>::new(
+			*best_header.state_root(),
+			storage_proof,
+		)
+		.map_err(Error::StorageProofError)?;
+		let maybe_encoded_storage_value =
+			storage_value_reader.read_value(CODE).map_err(Error::StorageProofError)?;
+		let encoded_storage_value_size =
+			maybe_encoded_storage_value.ok_or(Error::MissingMandatoryCodeEntry)?.len();
 
 		Ok(storage_proof_size - encoded_storage_value_size)
 	}
diff --git a/polkadot/bridges/relays/client-substrate/src/rpc.rs b/polkadot/bridges/relays/client-substrate/src/rpc.rs
index 06df1f705d093d01f2c67ef9c513861bb70fd5e6..efd45ebe43f36d102ee17f7f047f20a30e4c8c1a 100644
--- a/polkadot/bridges/relays/client-substrate/src/rpc.rs
+++ b/polkadot/bridges/relays/client-substrate/src/rpc.rs
@@ -18,11 +18,13 @@
 
 use crate::chain::Chain;
 
+use pallet_transaction_payment_rpc_runtime_api::FeeDetails;
 use sc_rpc_api::{state::ReadProof, system::Health};
 use sp_core::{
 	storage::{StorageData, StorageKey},
 	Bytes,
 };
+use sp_rpc::number::NumberOrHex;
 use sp_version::RuntimeVersion;
 
 jsonrpsee_proc_macros::rpc_client_api! {
@@ -41,13 +43,17 @@ jsonrpsee_proc_macros::rpc_client_api! {
 		fn system_account_next_index(account_id: C::AccountId) -> C::Index;
 		#[rpc(method = "author_submitExtrinsic", positional_params)]
 		fn author_submit_extrinsic(extrinsic: Bytes) -> C::Hash;
+		#[rpc(method = "author_pendingExtrinsics", positional_params)]
+		fn author_pending_extrinsics() -> Vec<Bytes>;
 		#[rpc(method = "state_call", positional_params)]
 		fn state_call(method: String, data: Bytes, at_block: Option<C::Hash>) -> Bytes;
 		#[rpc(method = "state_getStorage", positional_params)]
-		fn state_get_storage(key: StorageKey) -> Option<StorageData>;
+		fn state_get_storage(key: StorageKey, at_block: Option<C::Hash>) -> Option<StorageData>;
 		#[rpc(method = "state_getReadProof", positional_params)]
 		fn state_prove_storage(keys: Vec<StorageKey>, hash: Option<C::Hash>) -> ReadProof<C::Hash>;
 		#[rpc(method = "state_getRuntimeVersion", positional_params)]
 		fn state_runtime_version() -> RuntimeVersion;
+		#[rpc(method = "payment_queryFeeDetails", positional_params)]
+		fn payment_query_fee_details(extrinsic: Bytes, at_block: Option<C::Hash>) -> FeeDetails<NumberOrHex>;
 	}
 }
diff --git a/polkadot/bridges/relays/client-westend/Cargo.toml b/polkadot/bridges/relays/client-westend/Cargo.toml
index a408ae3a46daf7b0bd9a0e6d7b5ddd3a28c8f379..24b05c4f4836b2dab2fc1947273dfc081b98ec65 100644
--- a/polkadot/bridges/relays/client-westend/Cargo.toml
+++ b/polkadot/bridges/relays/client-westend/Cargo.toml
@@ -6,8 +6,7 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
 
@@ -17,9 +16,5 @@ bp-westend = { path = "../../primitives/chain-westend" }
 
 # Substrate Dependencies
 
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
-frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-westend/src/lib.rs b/polkadot/bridges/relays/client-westend/src/lib.rs
index 6768b81f10f8c63b73e0f43ac9c833097ae29c9d..c719d6ea55364be8c09d00220d82ec6681f7419d 100644
--- a/polkadot/bridges/relays/client-westend/src/lib.rs
+++ b/polkadot/bridges/relays/client-westend/src/lib.rs
@@ -16,10 +16,8 @@
 
 //! Types used to connect to the Westend chain.
 
-use codec::Encode;
-use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme};
-use sp_core::{storage::StorageKey, Pair};
-use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
+use relay_substrate_client::{Chain, ChainBase, ChainWithBalances};
+use sp_core::storage::StorageKey;
 use std::time::Duration;
 
 /// Westend header id.
@@ -37,17 +35,22 @@ impl ChainBase for Westend {
 	type Hash = bp_westend::Hash;
 	type Hasher = bp_westend::Hasher;
 	type Header = bp_westend::Header;
+
+	type AccountId = bp_westend::AccountId;
+	type Balance = bp_westend::Balance;
+	type Index = bp_westend::Nonce;
+	type Signature = bp_westend::Signature;
 }
 
 impl Chain for Westend {
 	const NAME: &'static str = "Westend";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_westend::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_westend::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = bp_westend::AccountId;
-	type Index = bp_westend::Nonce;
 	type SignedBlock = bp_westend::SignedBlock;
 	type Call = bp_westend::Call;
-	type Balance = bp_westend::Balance;
+	type WeightToFee = bp_westend::WeightToFee;
 }
 
 impl ChainWithBalances for Westend {
@@ -55,42 +58,3 @@ impl ChainWithBalances for Westend {
 		StorageKey(bp_westend::account_info_storage_key(account_id))
 	}
 }
-
-impl TransactionSignScheme for Westend {
-	type Chain = Westend;
-	type AccountKeyPair = sp_core::sr25519::Pair;
-	type SignedTransaction = bp_westend::UncheckedExtrinsic;
-
-	fn sign_transaction(
-		genesis_hash: <Self::Chain as ChainBase>::Hash,
-		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
-	) -> Self::SignedTransaction {
-		let raw_payload = SignedPayload::new(
-			call,
-			bp_westend::SignedExtensions::new(
-				bp_westend::VERSION,
-				sp_runtime::generic::Era::Immortal,
-				genesis_hash,
-				signer_nonce,
-				0,
-			),
-		)
-		.expect("SignedExtension never fails.");
-
-		let signature = raw_payload.using_encoded(|payload| signer.sign(payload));
-		let signer: sp_runtime::MultiSigner = signer.public().into();
-		let (call, extra, _) = raw_payload.deconstruct();
-
-		bp_westend::UncheckedExtrinsic::new_signed(
-			call,
-			sp_runtime::MultiAddress::Id(signer.into_account()),
-			signature.into(),
-			extra,
-		)
-	}
-}
-
-/// Westend signing params.
-pub type SigningParams = sp_core::sr25519::Pair;
diff --git a/polkadot/bridges/relays/client-wococo/Cargo.toml b/polkadot/bridges/relays/client-wococo/Cargo.toml
index c1b9aafd95e7c8d754faa781b4caa0e6939dbb5a..ea46c3c898bbbb3f22afd722242579a105d99105 100644
--- a/polkadot/bridges/relays/client-wococo/Cargo.toml
+++ b/polkadot/bridges/relays/client-wococo/Cargo.toml
@@ -6,10 +6,10 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
-codec = { package = "parity-scale-codec", version = "2.0.0" }
-headers-relay = { path = "../headers" }
+codec = { package = "parity-scale-codec", version = "2.2.0" }
 relay-substrate-client = { path = "../client-substrate" }
 relay-utils = { path = "../utils" }
+scale-info = { version = "1.0", default-features = false, features = ["derive"] }
 
 # Bridge dependencies
 bridge-runtime-common = { path = "../../bin/runtime-common" }
@@ -24,9 +24,6 @@ pallet-bridge-dispatch = { path = "../../modules/dispatch" }
 pallet-bridge-messages = { path = "../../modules/messages" }
 
 # Substrate Dependencies
-frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" }
 frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
-pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
-sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" }
 sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
diff --git a/polkadot/bridges/relays/client-wococo/src/lib.rs b/polkadot/bridges/relays/client-wococo/src/lib.rs
index 8ceba7c7c436d5d8cc0ab38c20387b39753a5698..d61915ec123708580ac117f4ffbabdeddddde0c8 100644
--- a/polkadot/bridges/relays/client-wococo/src/lib.rs
+++ b/polkadot/bridges/relays/client-wococo/src/lib.rs
@@ -17,7 +17,10 @@
 //! Types used to connect to the Wococo-Substrate chain.
 
 use codec::Encode;
-use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, TransactionSignScheme};
+use relay_substrate_client::{
+	Chain, ChainBase, ChainWithBalances, TransactionEraOf, TransactionSignScheme,
+	UnsignedTransaction,
+};
 use sp_core::{storage::StorageKey, Pair};
 use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount};
 use std::time::Duration;
@@ -39,17 +42,22 @@ impl ChainBase for Wococo {
 	type Hash = bp_wococo::Hash;
 	type Hasher = bp_wococo::Hashing;
 	type Header = bp_wococo::Header;
+
+	type AccountId = bp_wococo::AccountId;
+	type Balance = bp_wococo::Balance;
+	type Index = bp_wococo::Nonce;
+	type Signature = bp_wococo::Signature;
 }
 
 impl Chain for Wococo {
 	const NAME: &'static str = "Wococo";
 	const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6);
+	const STORAGE_PROOF_OVERHEAD: u32 = bp_wococo::EXTRA_STORAGE_PROOF_SIZE;
+	const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE;
 
-	type AccountId = bp_wococo::AccountId;
-	type Index = bp_wococo::Index;
 	type SignedBlock = bp_wococo::SignedBlock;
 	type Call = crate::runtime::Call;
-	type Balance = bp_wococo::Balance;
+	type WeightToFee = bp_wococo::WeightToFee;
 }
 
 impl ChainWithBalances for Wococo {
@@ -66,17 +74,17 @@ impl TransactionSignScheme for Wococo {
 	fn sign_transaction(
 		genesis_hash: <Self::Chain as ChainBase>::Hash,
 		signer: &Self::AccountKeyPair,
-		signer_nonce: <Self::Chain as Chain>::Index,
-		call: <Self::Chain as Chain>::Call,
+		era: TransactionEraOf<Self::Chain>,
+		unsigned: UnsignedTransaction<Self::Chain>,
 	) -> Self::SignedTransaction {
 		let raw_payload = SignedPayload::new(
-			call,
+			unsigned.call,
 			bp_wococo::SignedExtensions::new(
 				bp_wococo::VERSION,
-				sp_runtime::generic::Era::Immortal,
+				era,
 				genesis_hash,
-				signer_nonce,
-				0,
+				unsigned.nonce,
+				unsigned.tip,
 			),
 		)
 		.expect("SignedExtension never fails.");
@@ -92,6 +100,24 @@ impl TransactionSignScheme for Wococo {
 			extra,
 		)
 	}
+
+	fn is_signed(tx: &Self::SignedTransaction) -> bool {
+		tx.signature.is_some()
+	}
+
+	fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool {
+		tx.signature
+			.as_ref()
+			.map(|(address, _, _)| {
+				*address == bp_wococo::AccountId::from(*signer.public().as_array_ref()).into()
+			})
+			.unwrap_or(false)
+	}
+
+	fn parse_transaction(tx: Self::SignedTransaction) -> Option<UnsignedTransaction<Self::Chain>> {
+		let extra = &tx.signature.as_ref()?.2;
+		Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() })
+	}
 }
 
 /// Wococo signing params.
diff --git a/polkadot/bridges/relays/client-wococo/src/runtime.rs b/polkadot/bridges/relays/client-wococo/src/runtime.rs
index e973c3a6d028055e5767b8987e07664d26b3be88..91d32d1aa76f71a9dc7aefe51bbcdd662053e3f7 100644
--- a/polkadot/bridges/relays/client-wococo/src/runtime.rs
+++ b/polkadot/bridges/relays/client-wococo/src/runtime.rs
@@ -21,9 +21,7 @@ use bp_polkadot_core::PolkadotLike;
 use bp_runtime::Chain;
 use codec::{Decode, Encode};
 use frame_support::weights::Weight;
-
-/// Instance of messages pallet that is used to bridge with Rococo chain.
-pub type WithRococoMessagesInstance = pallet_bridge_messages::DefaultInstance;
+use scale_info::TypeInfo;
 
 /// Unchecked Wococo extrinsic.
 pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic<Call>;
@@ -55,15 +53,15 @@ where
 /// Wococo Runtime `Call` enum.
 ///
 /// The enum represents a subset of possible `Call`s we can send to Rococo chain.
-/// Ideally this code would be auto-generated from Metadata, because we want to
+/// Ideally this code would be auto-generated from metadata, because we want to
 /// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s.
 ///
 /// All entries here (like pretty much in the entire file) must be kept in sync with Rococo
 /// `construct_runtime`, so that we maintain SCALE-compatibility.
 ///
-/// See: https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs
+/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs)
 #[allow(clippy::large_enum_variant)]
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 pub enum Call {
 	/// System pallet.
 	#[codec(index = 0)]
@@ -76,26 +74,26 @@ pub enum Call {
 	BridgeMessagesRococo(BridgeMessagesRococoCall),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum SystemCall {
 	#[codec(index = 1)]
 	remark(Vec<u8>),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum BridgeGrandpaRococoCall {
 	#[codec(index = 0)]
 	submit_finality_proof(
-		<PolkadotLike as Chain>::Header,
+		Box<<PolkadotLike as Chain>::Header>,
 		bp_header_chain::justification::GrandpaJustification<<PolkadotLike as Chain>::Header>,
 	),
 	#[codec(index = 1)]
 	initialize(bp_header_chain::InitializationData<<PolkadotLike as Chain>::Header>),
 }
 
-#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone)]
+#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)]
 #[allow(non_camel_case_types)]
 pub enum BridgeMessagesRococoCall {
 	#[codec(index = 3)]
@@ -118,7 +116,9 @@ pub enum BridgeMessagesRococoCall {
 	),
 	#[codec(index = 6)]
 	receive_messages_delivery_proof(
-		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<bp_rococo::Hash>,
+		bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof<
+			bp_rococo::Hash,
+		>,
 		UnrewardedRelayersState,
 	),
 }
diff --git a/polkadot/bridges/relays/exchange/Cargo.toml b/polkadot/bridges/relays/exchange/Cargo.toml
index 62e7a029bbb2c996f49a2bfae213e9434ae07901..f08c40325ec7146e6bf76acf2c141d2c91fc7d13 100644
--- a/polkadot/bridges/relays/exchange/Cargo.toml
+++ b/polkadot/bridges/relays/exchange/Cargo.toml
@@ -6,6 +6,7 @@ edition = "2018"
 license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
+anyhow = "1.0"
 async-std = "1.6.5"
 async-trait = "0.1.40"
 backoff = "0.2"
@@ -14,3 +15,4 @@ log = "0.4.11"
 num-traits = "0.2"
 parking_lot = "0.11.0"
 relay-utils = { path = "../utils" }
+thiserror = "1.0.26"
diff --git a/polkadot/bridges/relays/exchange/src/error.rs b/polkadot/bridges/relays/exchange/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..aa5c427a9efbd1c2555b24c11683c210e0445a97
--- /dev/null
+++ b/polkadot/bridges/relays/exchange/src/error.rs
@@ -0,0 +1,66 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Exchange-relay errors.
+
+use crate::exchange::{BlockHashOf, BlockNumberOf, TransactionHashOf};
+
+use relay_utils::MaybeConnectionError;
+use std::fmt::{Debug, Display};
+use thiserror::Error;
+
+/// Error type given pipeline.
+pub type ErrorOf<P> = Error<BlockHashOf<P>, BlockNumberOf<P>, TransactionHashOf<P>>;
+
+/// Exchange-relay error type.
+#[derive(Error, Debug)]
+pub enum Error<Hash: Display, HeaderNumber: Display, SourceTxHash: Display> {
+	/// Failed to check finality of the requested header on the target node.
+	#[error("Failed to check finality of header {0}/{1} on {2} node: {3:?}")]
+	Finality(HeaderNumber, Hash, &'static str, anyhow::Error),
+	/// Error retrieving block from the source node.
+	#[error("Error retrieving block {0} from {1} node: {2:?}")]
+	RetrievingBlock(Hash, &'static str, anyhow::Error),
+	/// Error retrieving transaction from the source node.
+	#[error("Error retrieving transaction {0} from {1} node: {2:?}")]
+	RetrievingTransaction(SourceTxHash, &'static str, anyhow::Error),
+	/// Failed to check existence of header from the target node.
+	#[error("Failed to check existence of header {0}/{1} on {2} node: {3:?}")]
+	CheckHeaderExistence(HeaderNumber, Hash, &'static str, anyhow::Error),
+	/// Failed to prepare proof for the transaction from the source node.
+	#[error("Error building transaction {0} proof on {1} node: {2:?}")]
+	BuildTransactionProof(String, &'static str, anyhow::Error, bool),
+	/// Failed to submit the transaction proof to the target node.
+	#[error("Error submitting transaction {0} proof to {1} node: {2:?}")]
+	SubmitTransactionProof(String, &'static str, anyhow::Error, bool),
+	/// Transaction filtering failed.
+	#[error("Transaction filtering has failed with {0:?}")]
+	TransactionFiltering(anyhow::Error, bool),
+	/// Utilities/metrics error.
+	#[error("{0}")]
+	Utils(#[from] relay_utils::Error),
+}
+
+impl<T: Display, U: Display, V: Display> MaybeConnectionError for Error<T, U, V> {
+	fn is_connection_error(&self) -> bool {
+		match *self {
+			Self::BuildTransactionProof(_, _, _, b) => b,
+			Self::SubmitTransactionProof(_, _, _, b) => b,
+			Self::TransactionFiltering(_, b) => b,
+			_ => false,
+		}
+	}
+}
diff --git a/polkadot/bridges/relays/exchange/src/exchange.rs b/polkadot/bridges/relays/exchange/src/exchange.rs
index 7128a0ccd097b582112b787a6ef0cae774c26022..b8ecb018ec08fafb2d8399fc960e1bccc41c343e 100644
--- a/polkadot/bridges/relays/exchange/src/exchange.rs
+++ b/polkadot/bridges/relays/exchange/src/exchange.rs
@@ -16,10 +16,11 @@
 
 //! Relaying proofs of exchange transaction.
 
+use crate::error::{Error, ErrorOf};
+
+use anyhow::anyhow;
 use async_trait::async_trait;
-use relay_utils::{
-	relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, StringifiedMaybeConnectionError,
-};
+use relay_utils::{relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError};
 use std::{
 	fmt::{Debug, Display},
 	string::ToString,
@@ -66,7 +67,7 @@ pub trait SourceBlock: 'static + Send + Sync {
 /// Transaction that is participating in exchange.
 pub trait SourceTransaction: 'static + Send {
 	/// Transaction hash type.
-	type Hash: Debug + Display;
+	type Hash: Debug + Display + Clone;
 
 	/// Return transaction hash.
 	fn hash(&self) -> Self::Hash;
@@ -96,12 +97,18 @@ pub trait SourceClient<P: TransactionProofPipeline>: RelayClient {
 	async fn block_by_hash(&self, hash: BlockHashOf<P>) -> Result<P::Block, Self::Error>;
 	/// Get canonical block by number.
 	async fn block_by_number(&self, number: BlockNumberOf<P>) -> Result<P::Block, Self::Error>;
-	/// Return block + index where transaction has been **mined**. May return `Ok(None)` if transaction
-	/// is unknown to the source node.
-	async fn transaction_block(&self, hash: &TransactionHashOf<P>)
-		-> Result<Option<(HeaderId<P>, usize)>, Self::Error>;
+	/// Return block + index where transaction has been **mined**. May return `Ok(None)` if
+	/// transaction is unknown to the source node.
+	async fn transaction_block(
+		&self,
+		hash: &TransactionHashOf<P>,
+	) -> Result<Option<(HeaderId<P>, usize)>, Self::Error>;
 	/// Prepare transaction proof.
-	async fn transaction_proof(&self, block: &P::Block, tx_index: usize) -> Result<P::TransactionProof, Self::Error>;
+	async fn transaction_proof(
+		&self,
+		block: &P::Block,
+		tx_index: usize,
+	) -> Result<P::TransactionProof, Self::Error>;
 }
 
 /// Target client API.
@@ -110,15 +117,19 @@ pub trait TargetClient<P: TransactionProofPipeline>: RelayClient {
 	/// Sleep until exchange-related data is (probably) updated.
 	async fn tick(&self);
 	/// Returns `Ok(true)` if header is known to the target node.
-	async fn is_header_known(&self, id: &HeaderId<P>) -> Result<bool, Self::Error>;
+	async fn is_header_known(&self, id: &HeaderId<P>) -> std::result::Result<bool, Self::Error>;
 	/// Returns `Ok(true)` if header is finalized by the target node.
 	async fn is_header_finalized(&self, id: &HeaderId<P>) -> Result<bool, Self::Error>;
 	/// Returns best finalized header id.
 	async fn best_finalized_header_id(&self) -> Result<HeaderId<P>, Self::Error>;
 	/// Returns `Ok(true)` if transaction proof is need to be relayed.
-	async fn filter_transaction_proof(&self, proof: &P::TransactionProof) -> Result<bool, Self::Error>;
+	async fn filter_transaction_proof(
+		&self,
+		proof: &P::TransactionProof,
+	) -> Result<bool, Self::Error>;
 	/// Submits transaction proof to the target node.
-	async fn submit_transaction_proof(&self, proof: P::TransactionProof) -> Result<(), Self::Error>;
+	async fn submit_transaction_proof(&self, proof: P::TransactionProof)
+		-> Result<(), Self::Error>;
 }
 
 /// Block transaction statistics.
@@ -154,27 +165,28 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 	for (source_tx_index, source_tx) in transactions_to_process {
 		let result = async {
 			let source_tx_id = format!("{}/{}", source_block.id().1, source_tx_index);
-			let source_tx_proof =
-				prepare_transaction_proof(source_client, &source_tx_id, source_block, source_tx_index)
-					.await
-					.map_err(|e| (FailedClient::Source, e))?;
+			let source_tx_proof = prepare_transaction_proof(
+				source_client,
+				&source_tx_id,
+				source_block,
+				source_tx_index,
+			)
+			.await
+			.map_err(|e| (FailedClient::Source, e))?;
 
 			let needs_to_be_relayed =
-				target_client
-					.filter_transaction_proof(&source_tx_proof)
-					.await
-					.map_err(|err| {
-						(
-							FailedClient::Target,
-							StringifiedMaybeConnectionError::new(
-								err.is_connection_error(),
-								format!("Transaction filtering has failed with {:?}", err),
-							),
-						)
-					})?;
+				target_client.filter_transaction_proof(&source_tx_proof).await.map_err(|err| {
+					(
+						FailedClient::Target,
+						Error::TransactionFiltering(
+							anyhow!("{:?}", err),
+							err.is_connection_error(),
+						),
+					)
+				})?;
 
 			if !needs_to_be_relayed {
-				return Ok(false);
+				return Ok(false)
 			}
 
 			relay_ready_transaction_proof(target_client, &source_tx_id, source_tx_proof)
@@ -191,13 +203,14 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 		// Option#1 may seems better, but:
 		// 1) we do not track if transaction is mined (without an error) by the target node;
 		// 2) error could be irrecoverable (e.g. when block is already pruned by bridge module or tx
-		//    has invalid format) && we'll end up in infinite loop of retrying the same transaction proof.
+		//    has invalid format) && we'll end up in infinite loop of retrying the same transaction
+		// proof.
 		//
 		// So we're going with option#2 here (the only exception are connection errors).
 		match result {
 			Ok(false) => {
 				relayed_transactions.processed += 1;
-			}
+			},
 			Ok(true) => {
 				log::info!(
 					target: "bridge",
@@ -209,7 +222,7 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 
 				relayed_transactions.processed += 1;
 				relayed_transactions.relayed += 1;
-			}
+			},
 			Err((failed_client, err)) => {
 				log::error!(
 					target: "bridge",
@@ -226,12 +239,12 @@ pub async fn relay_block_transactions<P: TransactionProofPipeline>(
 				);
 
 				if err.is_connection_error() {
-					return Err((failed_client, relayed_transactions));
+					return Err((failed_client, relayed_transactions))
 				}
 
 				relayed_transactions.processed += 1;
 				relayed_transactions.failed += 1;
-			}
+			},
 		}
 	}
 
@@ -243,19 +256,14 @@ pub async fn relay_single_transaction_proof<P: TransactionProofPipeline>(
 	source_client: &impl SourceClient<P>,
 	target_client: &impl TargetClient<P>,
 	source_tx_hash: TransactionHashOf<P>,
-) -> Result<(), String> {
+) -> Result<(), ErrorOf<P>> {
 	// wait for transaction and header on source node
-	let (source_header_id, source_tx_index) = wait_transaction_mined(source_client, &source_tx_hash).await?;
+	let (source_header_id, source_tx_index) =
+		wait_transaction_mined(source_client, &source_tx_hash).await?;
 	let source_block = source_client.block_by_hash(source_header_id.1.clone()).await;
 	let source_block = source_block.map_err(|err| {
-		format!(
-			"Error retrieving block {} from {} node: {:?}",
-			source_header_id.1,
-			P::SOURCE_NAME,
-			err,
-		)
+		Error::RetrievingBlock(source_header_id.1.clone(), P::SOURCE_NAME, anyhow!("{:?}", err))
 	})?;
-
 	// wait for transaction and header on target node
 	wait_header_imported(target_client, &source_header_id).await?;
 	wait_header_finalized(target_client, &source_header_id).await?;
@@ -266,11 +274,10 @@ pub async fn relay_single_transaction_proof<P: TransactionProofPipeline>(
 		target_client,
 		&source_tx_id,
 		prepare_transaction_proof(source_client, &source_tx_id, &source_block, source_tx_index)
-			.await
-			.map_err(|err| err.to_string())?,
+			.await?,
 	)
 	.await
-	.map_err(|err| err.to_string())
+	.map_err(Into::into)
 }
 
 /// Prepare transaction proof.
@@ -279,19 +286,16 @@ async fn prepare_transaction_proof<P: TransactionProofPipeline>(
 	source_tx_id: &str,
 	source_block: &P::Block,
 	source_tx_index: usize,
-) -> Result<P::TransactionProof, StringifiedMaybeConnectionError> {
+) -> Result<P::TransactionProof, ErrorOf<P>> {
 	source_client
 		.transaction_proof(source_block, source_tx_index)
 		.await
 		.map_err(|err| {
-			StringifiedMaybeConnectionError::new(
+			Error::BuildTransactionProof(
+				source_tx_id.to_owned(),
+				P::SOURCE_NAME,
+				anyhow!("{:?}", err),
 				err.is_connection_error(),
-				format!(
-					"Error building transaction {} proof on {} node: {:?}",
-					source_tx_id,
-					P::SOURCE_NAME,
-					err,
-				),
 			)
 		})
 }
@@ -301,37 +305,31 @@ async fn relay_ready_transaction_proof<P: TransactionProofPipeline>(
 	target_client: &impl TargetClient<P>,
 	source_tx_id: &str,
 	source_tx_proof: P::TransactionProof,
-) -> Result<(), StringifiedMaybeConnectionError> {
-	target_client
-		.submit_transaction_proof(source_tx_proof)
-		.await
-		.map_err(|err| {
-			StringifiedMaybeConnectionError::new(
-				err.is_connection_error(),
-				format!(
-					"Error submitting transaction {} proof to {} node: {:?}",
-					source_tx_id,
-					P::TARGET_NAME,
-					err,
-				),
-			)
-		})
+) -> Result<(), ErrorOf<P>> {
+	target_client.submit_transaction_proof(source_tx_proof).await.map_err(|err| {
+		Error::SubmitTransactionProof(
+			source_tx_id.to_owned(),
+			P::TARGET_NAME,
+			anyhow!("{:?}", err),
+			err.is_connection_error(),
+		)
+	})
 }
 
 /// Wait until transaction is mined by source node.
 async fn wait_transaction_mined<P: TransactionProofPipeline>(
 	source_client: &impl SourceClient<P>,
 	source_tx_hash: &TransactionHashOf<P>,
-) -> Result<(HeaderId<P>, usize), String> {
+) -> Result<(HeaderId<P>, usize), ErrorOf<P>> {
 	loop {
-		let source_header_and_tx = source_client.transaction_block(source_tx_hash).await.map_err(|err| {
-			format!(
-				"Error retrieving transaction {} from {} node: {:?}",
-				source_tx_hash,
-				P::SOURCE_NAME,
-				err,
-			)
-		})?;
+		let source_header_and_tx =
+			source_client.transaction_block(source_tx_hash).await.map_err(|err| {
+				Error::RetrievingTransaction(
+					source_tx_hash.clone(),
+					P::SOURCE_NAME,
+					anyhow!("{:?}", err),
+				)
+			})?;
 		match source_header_and_tx {
 			Some((source_header_id, source_tx)) => {
 				log::info!(
@@ -341,8 +339,8 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 					P::SOURCE_NAME,
 				);
 
-				return Ok((source_header_id, source_tx));
-			}
+				return Ok((source_header_id, source_tx))
+			},
 			None => {
 				log::info!(
 					target: "bridge",
@@ -352,7 +350,7 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 				);
 
 				source_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -361,17 +359,17 @@ async fn wait_transaction_mined<P: TransactionProofPipeline>(
 async fn wait_header_imported<P: TransactionProofPipeline>(
 	target_client: &impl TargetClient<P>,
 	source_header_id: &HeaderId<P>,
-) -> Result<(), String> {
+) -> Result<(), ErrorOf<P>> {
 	loop {
-		let is_header_known = target_client.is_header_known(source_header_id).await.map_err(|err| {
-			format!(
-				"Failed to check existence of header {}/{} on {} node: {:?}",
-				source_header_id.0,
-				source_header_id.1,
-				P::TARGET_NAME,
-				err,
-			)
-		})?;
+		let is_header_known =
+			target_client.is_header_known(source_header_id).await.map_err(|err| {
+				Error::CheckHeaderExistence(
+					source_header_id.0,
+					source_header_id.1.clone(),
+					P::TARGET_NAME,
+					anyhow!("{:?}", err),
+				)
+			})?;
 		match is_header_known {
 			true => {
 				log::info!(
@@ -382,8 +380,8 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 					P::TARGET_NAME,
 				);
 
-				return Ok(());
-			}
+				return Ok(())
+			},
 			false => {
 				log::info!(
 					target: "bridge",
@@ -394,7 +392,7 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 				);
 
 				target_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -403,18 +401,15 @@ async fn wait_header_imported<P: TransactionProofPipeline>(
 async fn wait_header_finalized<P: TransactionProofPipeline>(
 	target_client: &impl TargetClient<P>,
 	source_header_id: &HeaderId<P>,
-) -> Result<(), String> {
+) -> Result<(), ErrorOf<P>> {
 	loop {
-		let is_header_finalized = target_client
-			.is_header_finalized(source_header_id)
-			.await
-			.map_err(|err| {
-				format!(
-					"Failed to check finality of header {}/{} on {} node: {:?}",
+		let is_header_finalized =
+			target_client.is_header_finalized(source_header_id).await.map_err(|err| {
+				Error::Finality(
 					source_header_id.0,
-					source_header_id.1,
+					source_header_id.1.clone(),
 					P::TARGET_NAME,
-					err,
+					anyhow!("{:?}", err),
 				)
 			})?;
 		match is_header_finalized {
@@ -427,8 +422,8 @@ async fn wait_header_finalized<P: TransactionProofPipeline>(
 					P::TARGET_NAME,
 				);
 
-				return Ok(());
-			}
+				return Ok(())
+			},
 			false => {
 				log::info!(
 					target: "bridge",
@@ -439,7 +434,7 @@ async fn wait_header_finalized<P: TransactionProofPipeline>(
 				);
 
 				target_client.tick().await;
-			}
+			},
 		}
 	}
 }
@@ -582,15 +577,22 @@ pub(crate) mod tests {
 			self.data.lock().block.clone()
 		}
 
-		async fn transaction_block(&self, _: &TestTransactionHash) -> Result<Option<(TestHeaderId, usize)>, TestError> {
+		async fn transaction_block(
+			&self,
+			_: &TestTransactionHash,
+		) -> Result<Option<(TestHeaderId, usize)>, TestError> {
 			self.data.lock().transaction_block.clone()
 		}
 
-		async fn transaction_proof(&self, block: &TestBlock, index: usize) -> Result<TestTransactionProof, TestError> {
+		async fn transaction_proof(
+			&self,
+			block: &TestBlock,
+			index: usize,
+		) -> Result<TestTransactionProof, TestError> {
 			let tx_hash = block.1[index].hash();
 			let proof_error = self.data.lock().proofs_to_fail.get(&tx_hash).cloned();
 			if let Some(err) = proof_error {
-				return Err(err);
+				return Err(err)
 			}
 
 			Ok(TestTransactionProof(tx_hash))
@@ -653,21 +655,32 @@ pub(crate) mod tests {
 			self.data.lock().best_finalized_header_id.clone()
 		}
 
-		async fn filter_transaction_proof(&self, proof: &TestTransactionProof) -> Result<bool, TestError> {
+		async fn filter_transaction_proof(
+			&self,
+			proof: &TestTransactionProof,
+		) -> Result<bool, TestError> {
 			Ok(self.data.lock().transactions_to_accept.contains(&proof.0))
 		}
 
-		async fn submit_transaction_proof(&self, proof: TestTransactionProof) -> Result<(), TestError> {
+		async fn submit_transaction_proof(
+			&self,
+			proof: TestTransactionProof,
+		) -> Result<(), TestError> {
 			self.data.lock().submitted_proofs.push(proof);
 			Ok(())
 		}
 	}
 
-	fn ensure_relay_single_success(source: &TestTransactionsSource, target: &TestTransactionsTarget) {
-		assert_eq!(
-			async_std::task::block_on(relay_single_transaction_proof(source, target, test_transaction_hash(0),)),
-			Ok(()),
-		);
+	fn ensure_relay_single_success(
+		source: &TestTransactionsSource,
+		target: &TestTransactionsTarget,
+	) {
+		assert!(async_std::task::block_on(relay_single_transaction_proof(
+			source,
+			target,
+			test_transaction_hash(0)
+		))
+		.is_ok());
 		assert_eq!(
 			target.data.lock().submitted_proofs,
 			vec![TestTransactionProof(test_transaction_hash(0))],
@@ -782,11 +795,7 @@ pub(crate) mod tests {
 		let source = TestTransactionsSource::new(Box::new(|_| unreachable!("no ticks allowed")));
 		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed")));
 
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.remove(&test_transaction_hash(0));
+		target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0));
 
 		ensure_relay_single_success(&source, &target)
 	}
@@ -814,25 +823,14 @@ pub(crate) mod tests {
 		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no ticks allowed")));
 
 		// let's only accept tx#1
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.remove(&test_transaction_hash(0));
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.insert(test_transaction_hash(1));
+		target.data.lock().transactions_to_accept.remove(&test_transaction_hash(0));
+		target.data.lock().transactions_to_accept.insert(test_transaction_hash(1));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 1,
-				failed: 0,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 1, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
@@ -852,14 +850,11 @@ pub(crate) mod tests {
 			.proofs_to_fail
 			.insert(test_transaction_hash(0), TestError(false));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 0,
-				failed: 1,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 0, failed: 1 }),
 		);
 		assert_eq!(target.data.lock().submitted_proofs, vec![]);
 	}
@@ -876,14 +871,11 @@ pub(crate) mod tests {
 			.proofs_to_fail
 			.insert(test_transaction_hash(1), TestError(true));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, Default::default());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, Default::default());
 		assert_eq!(
 			relayed_transactions,
-			Err(RelayedBlockTransactions {
-				processed: 1,
-				relayed: 1,
-				failed: 0,
-			}),
+			Err(RelayedBlockTransactions { processed: 1, relayed: 1, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
@@ -893,20 +885,13 @@ pub(crate) mod tests {
 		// now do not fail on tx#2
 		source.data.lock().proofs_to_fail.clear();
 		// and also relay tx#3
-		target
-			.data
-			.lock()
-			.transactions_to_accept
-			.insert(test_transaction_hash(2));
+		target.data.lock().transactions_to_accept.insert(test_transaction_hash(2));
 
-		let relayed_transactions = test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err());
+		let relayed_transactions =
+			test_relay_block_transactions(&source, &target, relayed_transactions.unwrap_err());
 		assert_eq!(
 			relayed_transactions,
-			Ok(RelayedBlockTransactions {
-				processed: 3,
-				relayed: 2,
-				failed: 0,
-			}),
+			Ok(RelayedBlockTransactions { processed: 3, relayed: 2, failed: 0 }),
 		);
 		assert_eq!(
 			target.data.lock().submitted_proofs,
diff --git a/polkadot/bridges/relays/exchange/src/exchange_loop.rs b/polkadot/bridges/relays/exchange/src/exchange_loop.rs
index 8da4c3f45687da8ee2b561ce7a301e680ddbb3d9..84d216f43968f8fc9f8898cfdd30208483d4f6ec 100644
--- a/polkadot/bridges/relays/exchange/src/exchange_loop.rs
+++ b/polkadot/bridges/relays/exchange/src/exchange_loop.rs
@@ -16,12 +16,16 @@
 
 //! Relaying proofs of exchange transactions.
 
-use crate::exchange::{
-	relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient, TargetClient,
-	TransactionProofPipeline,
+use crate::{
+	error::Error,
+	exchange::{
+		relay_block_transactions, BlockNumberOf, RelayedBlockTransactions, SourceClient,
+		TargetClient, TransactionProofPipeline,
+	},
+	exchange_loop_metrics::ExchangeLoopMetrics,
 };
-use crate::exchange_loop_metrics::ExchangeLoopMetrics;
 
+use crate::error::ErrorOf;
 use backoff::backoff::Backoff;
 use futures::{future::FutureExt, select};
 use num_traits::One;
@@ -58,13 +62,13 @@ pub struct InMemoryStorage<BlockNumber> {
 impl<BlockNumber> InMemoryStorage<BlockNumber> {
 	/// Created new in-memory storage with given best processed block number.
 	pub fn new(best_processed_header_number: BlockNumber) -> Self {
-		InMemoryStorage {
-			best_processed_header_number,
-		}
+		InMemoryStorage { best_processed_header_number }
 	}
 }
 
-impl<BlockNumber: 'static + Clone + Copy + Send + Sync> TransactionProofsRelayStorage for InMemoryStorage<BlockNumber> {
+impl<BlockNumber: 'static + Clone + Copy + Send + Sync> TransactionProofsRelayStorage
+	for InMemoryStorage<BlockNumber>
+{
 	type BlockNumber = BlockNumber;
 
 	fn state(&self) -> TransactionProofsRelayState<BlockNumber> {
@@ -90,13 +94,13 @@ pub async fn run<P: TransactionProofPipeline>(
 	target_client: impl TargetClient<P>,
 	metrics_params: MetricsParams,
 	exit_signal: impl Future<Output = ()> + 'static + Send,
-) -> Result<(), String> {
+) -> Result<(), ErrorOf<P>> {
 	let exit_signal = exit_signal.shared();
 
 	relay_utils::relay_loop(source_client, target_client)
 		.with_metrics(Some(metrics_prefix::<P>()), metrics_params)
-		.loop_metric(|registry, prefix| ExchangeLoopMetrics::new(registry, prefix))?
-		.standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))?
+		.loop_metric(ExchangeLoopMetrics::new)?
+		.standalone_metric(GlobalMetrics::new)?
 		.expose()
 		.await?
 		.run(metrics_prefix::<P>(), move |source_client, target_client, metrics| {
@@ -109,6 +113,7 @@ pub async fn run<P: TransactionProofPipeline>(
 			)
 		})
 		.await
+		.map_err(Error::Utils)
 }
 
 /// Run proofs synchronization.
@@ -140,12 +145,11 @@ async fn run_until_connection_lost<P: TransactionProofPipeline>(
 
 		if let Err((is_connection_error, failed_client)) = iteration_result {
 			if is_connection_error {
-				return Err(failed_client);
+				return Err(failed_client)
 			}
 
-			let retry_timeout = retry_backoff
-				.next_backoff()
-				.unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY);
+			let retry_timeout =
+				retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY);
 			select! {
 				_ = async_std::task::sleep(retry_timeout).fuse() => {},
 				_ = exit_signal => return Ok(()),
@@ -181,7 +185,7 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 			);
 
 			best_finalized_header_id
-		}
+		},
 		Err(err) => {
 			log::error!(
 				target: "bridge",
@@ -191,14 +195,20 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 				err,
 			);
 
-			return Err((err.is_connection_error(), FailedClient::Target));
-		}
+			return Err((err.is_connection_error(), FailedClient::Target))
+		},
 	};
 
 	loop {
 		// if we already have some finalized block body, try to relay its transactions
 		if let Some((block, relayed_transactions)) = current_finalized_block.take() {
-			let result = relay_block_transactions(source_client, target_client, &block, relayed_transactions).await;
+			let result = relay_block_transactions(
+				source_client,
+				target_client,
+				&block,
+				relayed_transactions,
+			)
+			.await;
 
 			match result {
 				Ok(relayed_transactions) => {
@@ -212,7 +222,8 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 						relayed_transactions.failed,
 					);
 
-					state.best_processed_header_number = state.best_processed_header_number + One::one();
+					state.best_processed_header_number =
+						state.best_processed_header_number + One::one();
 					storage.set_state(state);
 
 					if let Some(exchange_loop_metrics) = exchange_loop_metrics {
@@ -224,11 +235,11 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 					}
 
 					// we have just updated state => proceed to next block retrieval
-				}
+				},
 				Err((failed_client, relayed_transactions)) => {
 					*current_finalized_block = Some((block, relayed_transactions));
-					return Err((true, failed_client));
-				}
+					return Err((true, failed_client))
+				},
 			}
 		}
 
@@ -242,8 +253,8 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 					*current_finalized_block = Some((block, RelayedBlockTransactions::default()));
 
 					// we have received new finalized block => go back to relay its transactions
-					continue;
-				}
+					continue
+				},
 				Err(err) => {
 					log::error!(
 						target: "bridge",
@@ -253,13 +264,13 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 						err,
 					);
 
-					return Err((err.is_connection_error(), FailedClient::Source));
-				}
+					return Err((err.is_connection_error(), FailedClient::Source))
+				},
 			}
 		}
 
 		// there are no any transactions we need to relay => wait for new data
-		return Ok(());
+		return Ok(())
 	}
 }
 
@@ -267,17 +278,16 @@ async fn run_loop_iteration<P: TransactionProofPipeline>(
 mod tests {
 	use super::*;
 	use crate::exchange::tests::{
-		test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof, TestTransactionsSource,
-		TestTransactionsTarget,
+		test_next_block, test_next_block_id, test_transaction_hash, TestTransactionProof,
+		TestTransactionsSource, TestTransactionsTarget,
 	};
 	use futures::{future::FutureExt, stream::StreamExt};
 
 	#[test]
 	fn exchange_loop_is_able_to_relay_proofs() {
-		let storage = InMemoryStorage {
-			best_processed_header_number: 0,
-		};
-		let target = TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed")));
+		let storage = InMemoryStorage { best_processed_header_number: 0 };
+		let target =
+			TestTransactionsTarget::new(Box::new(|_| unreachable!("no target ticks allowed")));
 		let target_data = target.data.clone();
 		let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded();
 
@@ -295,11 +305,8 @@ mod tests {
 				(true, false) => {
 					data.block = Ok(test_next_block());
 					target_data.lock().best_finalized_header_id = Ok(test_next_block_id());
-					target_data
-						.lock()
-						.transactions_to_accept
-						.insert(test_transaction_hash(1));
-				}
+					target_data.lock().transactions_to_accept.insert(test_transaction_hash(1));
+				},
 				_ => (),
 			}
 		}));
diff --git a/polkadot/bridges/relays/exchange/src/lib.rs b/polkadot/bridges/relays/exchange/src/lib.rs
index 370f085b4bf7c2edb164122ceb6a6aa22889981f..d167e5aa398ee1f4b4a638d21de5b7fbcb206e19 100644
--- a/polkadot/bridges/relays/exchange/src/lib.rs
+++ b/polkadot/bridges/relays/exchange/src/lib.rs
@@ -21,6 +21,7 @@
 
 #![warn(missing_docs)]
 
+pub mod error;
 pub mod exchange;
 pub mod exchange_loop;
 pub mod exchange_loop_metrics;
diff --git a/polkadot/bridges/relays/finality/src/finality_loop.rs b/polkadot/bridges/relays/finality/src/finality_loop.rs
index 3ea729d123e7d71621327f03f862269cb52de315..191d1838379312854af2bb30c9195bdf17caec90 100644
--- a/polkadot/bridges/relays/finality/src/finality_loop.rs
+++ b/polkadot/bridges/relays/finality/src/finality_loop.rs
@@ -43,18 +43,19 @@ pub struct FinalitySyncParams {
 	/// `min(source_block_time, target_block_time)`.
 	///
 	/// This parameter may be used to limit transactions rate. Increase the value && you'll get
-	/// infrequent updates => sparse headers => potential slow down of bridge applications, but pallet storage
-	/// won't be super large. Decrease the value to near `source_block_time` and you'll get
-	/// transaction for (almost) every block of the source chain => all source headers will be known
-	/// to the target chain => bridge applications will run faster, but pallet storage may explode
-	/// (but if pruning is there, then it's fine).
+	/// infrequent updates => sparse headers => potential slow down of bridge applications, but
+	/// pallet storage won't be super large. Decrease the value to near `source_block_time` and
+	/// you'll get transaction for (almost) every block of the source chain => all source headers
+	/// will be known to the target chain => bridge applications will run faster, but pallet
+	/// storage may explode (but if pruning is there, then it's fine).
 	pub tick: Duration,
-	/// Number of finality proofs to keep in internal buffer between loop wakeups.
+	/// Number of finality proofs to keep in internal buffer between loop iterations.
 	///
-	/// While in "major syncing" state, we still read finality proofs from the stream. They're stored
-	/// in the internal buffer between loop wakeups. When we're close to the tip of the chain, we may
-	/// meet finality delays if headers are not finalized frequently. So instead of waiting for next
-	/// finality proof to appear in the stream, we may use existing proof from that buffer.
+	/// While in "major syncing" state, we still read finality proofs from the stream. They're
+	/// stored in the internal buffer between loop iterations. When we're close to the tip of the
+	/// chain, we may meet finality delays if headers are not finalized frequently. So instead of
+	/// waiting for next finality proof to appear in the stream, we may use existing proof from
+	/// that buffer.
 	pub recent_finality_proofs_limit: usize,
 	/// Timeout before we treat our transactions as lost and restart the whole sync process.
 	pub stall_timeout: Duration,
@@ -89,10 +90,15 @@ pub trait TargetClient<P: FinalitySyncPipeline>: RelayClient {
 	async fn best_finalized_source_block_number(&self) -> Result<P::Number, Self::Error>;
 
 	/// Submit header finality proof.
-	async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), Self::Error>;
+	async fn submit_finality_proof(
+		&self,
+		header: P::Header,
+		proof: P::FinalityProof,
+	) -> Result<(), Self::Error>;
 }
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: FinalitySyncPipeline>() -> String {
 	format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME)
 }
@@ -104,12 +110,12 @@ pub async fn run<P: FinalitySyncPipeline>(
 	sync_params: FinalitySyncParams,
 	metrics_params: MetricsParams,
 	exit_signal: impl Future<Output = ()> + 'static + Send,
-) -> Result<(), String> {
+) -> Result<(), relay_utils::Error> {
 	let exit_signal = exit_signal.shared();
 	relay_utils::relay_loop(source_client, target_client)
 		.with_metrics(Some(metrics_prefix::<P>()), metrics_params)
-		.loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))?
-		.standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))?
+		.loop_metric(SyncLoopMetrics::new)?
+		.standalone_metric(GlobalMetrics::new)?
 		.expose()
 		.await?
 		.run(metrics_prefix::<P>(), move |source_client, target_client, metrics| {
@@ -127,15 +133,11 @@ pub async fn run<P: FinalitySyncPipeline>(
 /// Unjustified headers container. Ordered by header number.
 pub(crate) type UnjustifiedHeaders<H> = Vec<H>;
 /// Finality proofs container. Ordered by target header number.
-pub(crate) type FinalityProofs<P> = Vec<(
-	<P as FinalitySyncPipeline>::Number,
-	<P as FinalitySyncPipeline>::FinalityProof,
-)>;
+pub(crate) type FinalityProofs<P> =
+	Vec<(<P as FinalitySyncPipeline>::Number, <P as FinalitySyncPipeline>::FinalityProof)>;
 /// Reference to finality proofs container.
-pub(crate) type FinalityProofsRef<'a, P> = &'a [(
-	<P as FinalitySyncPipeline>::Number,
-	<P as FinalitySyncPipeline>::FinalityProof,
-)];
+pub(crate) type FinalityProofsRef<'a, P> =
+	&'a [(<P as FinalitySyncPipeline>::Number, <P as FinalitySyncPipeline>::FinalityProof)];
 
 /// Error that may happen inside finality synchronization loop.
 #[derive(Debug)]
@@ -186,10 +188,7 @@ pub(crate) struct RestartableFinalityProofsStream<S> {
 #[cfg(test)]
 impl<S> From<S> for RestartableFinalityProofsStream<S> {
 	fn from(stream: S) -> Self {
-		RestartableFinalityProofsStream {
-			needs_restart: false,
-			stream: Box::pin(stream),
-		}
+		RestartableFinalityProofsStream { needs_restart: false, stream: Box::pin(stream) }
 	}
 }
 
@@ -260,14 +259,12 @@ async fn run_until_connection_lost<P: FinalitySyncPipeline>(
 				last_transaction = updated_last_transaction;
 				retry_backoff.reset();
 				sync_params.tick
-			}
+			},
 			Err(error) => {
 				log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error);
 				error.fail_if_connection_error()?;
-				retry_backoff
-					.next_backoff()
-					.unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY)
-			}
+				retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY)
+			},
 		};
 		if finality_proofs_stream.needs_restart {
 			log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME);
@@ -297,10 +294,8 @@ where
 	TC: TargetClient<P>,
 {
 	// read best source headers ids from source and target nodes
-	let best_number_at_source = source_client
-		.best_finalized_block_number()
-		.await
-		.map_err(Error::Source)?;
+	let best_number_at_source =
+		source_client.best_finalized_block_number().await.map_err(Error::Source)?;
 	let best_number_at_target = target_client
 		.best_finalized_source_block_number()
 		.await
@@ -309,7 +304,8 @@ where
 		metrics_sync.update_best_block_at_source(best_number_at_source);
 		metrics_sync.update_best_block_at_target(best_number_at_target);
 	}
-	*state.progress = print_sync_progress::<P>(*state.progress, best_number_at_source, best_number_at_target);
+	*state.progress =
+		print_sync_progress::<P>(*state.progress, best_number_at_source, best_number_at_target);
 
 	// if we have already submitted header, then we just need to wait for it
 	// if we're waiting too much, then we believe our transaction has been lost and restart sync
@@ -324,9 +320,9 @@ where
 				P::TARGET_NAME,
 			);
 
-			return Err(Error::Stalled);
+			return Err(Error::Stalled)
 		} else {
-			return Ok(Some(last_transaction));
+			return Ok(Some(last_transaction))
 		}
 	}
 
@@ -343,10 +339,8 @@ where
 	.await?
 	{
 		Some((header, justification)) => {
-			let new_transaction = Transaction {
-				time: Instant::now(),
-				submitted_header_number: header.number(),
-			};
+			let new_transaction =
+				Transaction { time: Instant::now(), submitted_header_number: header.number() };
 
 			log::debug!(
 				target: "bridge",
@@ -361,7 +355,7 @@ where
 				.await
 				.map_err(Error::Target)?;
 			Ok(Some(new_transaction))
-		}
+		},
 		None => Ok(None),
 	}
 }
@@ -398,15 +392,15 @@ where
 	)
 	.await?;
 	let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof {
-		SelectedFinalityProof::Mandatory(header, finality_proof) => return Ok(Some((header, finality_proof))),
+		SelectedFinalityProof::Mandatory(header, finality_proof) =>
+			return Ok(Some((header, finality_proof))),
 		_ if sync_params.only_mandatory_headers => {
 			// we are not reading finality proofs from the stream, so eventually it'll break
 			// but we don't care about transient proofs at all, so it is acceptable
-			return Ok(None);
-		}
-		SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => {
-			(unjustified_headers, Some((header, finality_proof)))
-		}
+			return Ok(None)
+		},
+		SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) =>
+			(unjustified_headers, Some((header, finality_proof))),
 		SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None),
 	};
 
@@ -451,7 +445,11 @@ pub(crate) enum SelectedFinalityProof<Header, FinalityProof> {
 /// Otherwise, `SelectedFinalityProof::None` is returned.
 ///
 /// Unless we have found mandatory header, all missing headers are collected and returned.
-pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClient<P>, TC: TargetClient<P>>(
+pub(crate) async fn read_missing_headers<
+	P: FinalitySyncPipeline,
+	SC: SourceClient<P>,
+	TC: TargetClient<P>,
+>(
 	source_client: &SC,
 	_target_client: &TC,
 	best_number_at_source: P::Number,
@@ -470,22 +468,30 @@ pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClie
 		match (is_mandatory, finality_proof) {
 			(true, Some(finality_proof)) => {
 				log::trace!(target: "bridge", "Header {:?} is mandatory", header_number);
-				return Ok(SelectedFinalityProof::Mandatory(header, finality_proof));
-			}
+				return Ok(SelectedFinalityProof::Mandatory(header, finality_proof))
+			},
 			(true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())),
 			(false, Some(finality_proof)) => {
 				log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number);
 				unjustified_headers.clear();
 				selected_finality_proof = Some((header, finality_proof));
-			}
+			},
 			(false, None) => {
 				unjustified_headers.push(header);
-			}
+			},
 		}
 
 		header_number = header_number + One::one();
 	}
 
+	log::trace!(
+		target: "bridge",
+		"Read {} {} headers. Selected finality proof for header: {:?}",
+		best_number_at_source.saturating_sub(best_number_at_target),
+		P::SOURCE_NAME,
+		selected_finality_proof.as_ref().map(|(header, _)| header),
+	);
+
 	Ok(match selected_finality_proof {
 		Some((header, proof)) => SelectedFinalityProof::Regular(unjustified_headers, header, proof),
 		None => SelectedFinalityProof::None(unjustified_headers),
@@ -493,22 +499,46 @@ pub(crate) async fn read_missing_headers<P: FinalitySyncPipeline, SC: SourceClie
 }
 
 /// Read finality proofs from the stream.
-pub(crate) fn read_finality_proofs_from_stream<P: FinalitySyncPipeline, FPS: Stream<Item = P::FinalityProof>>(
+pub(crate) fn read_finality_proofs_from_stream<
+	P: FinalitySyncPipeline,
+	FPS: Stream<Item = P::FinalityProof>,
+>(
 	finality_proofs_stream: &mut RestartableFinalityProofsStream<FPS>,
 	recent_finality_proofs: &mut FinalityProofs<P>,
 ) {
+	let mut proofs_count = 0;
+	let mut first_header_number = None;
+	let mut last_header_number = None;
 	loop {
 		let next_proof = finality_proofs_stream.stream.next();
 		let finality_proof = match next_proof.now_or_never() {
 			Some(Some(finality_proof)) => finality_proof,
 			Some(None) => {
 				finality_proofs_stream.needs_restart = true;
-				break;
-			}
+				break
+			},
 			None => break,
 		};
 
-		recent_finality_proofs.push((finality_proof.target_header_number(), finality_proof));
+		let target_header_number = finality_proof.target_header_number();
+		if first_header_number.is_none() {
+			first_header_number = Some(target_header_number);
+		}
+		last_header_number = Some(target_header_number);
+		proofs_count += 1;
+
+		recent_finality_proofs.push((target_header_number, finality_proof));
+	}
+
+	if proofs_count != 0 {
+		log::trace!(
+			target: "bridge",
+			"Read {} finality proofs from {} finality stream for headers in range [{:?}; {:?}]",
+			proofs_count,
+			P::SOURCE_NAME,
+			first_header_number,
+			last_header_number,
+		);
 	}
 }
 
@@ -520,7 +550,13 @@ pub(crate) fn select_better_recent_finality_proof<P: FinalitySyncPipeline>(
 	selected_finality_proof: Option<(P::Header, P::FinalityProof)>,
 ) -> Option<(P::Header, P::FinalityProof)> {
 	if unjustified_headers.is_empty() || recent_finality_proofs.is_empty() {
-		return selected_finality_proof;
+		log::trace!(
+			target: "bridge",
+			"Can not improve selected {} finality proof {:?}. No unjustified headers and recent proofs",
+			P::SOURCE_NAME,
+			selected_finality_proof.as_ref().map(|(h, _)| h.number()),
+		);
+		return selected_finality_proof
 	}
 
 	const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed";
@@ -542,9 +578,24 @@ pub(crate) fn select_better_recent_finality_proof<P: FinalitySyncPipeline>(
 	let selected_finality_proof_index = recent_finality_proofs
 		.binary_search_by_key(intersection.end(), |(number, _)| *number)
 		.unwrap_or_else(|index| index.saturating_sub(1));
-	let (selected_header_number, finality_proof) = &recent_finality_proofs[selected_finality_proof_index];
-	if !intersection.contains(selected_header_number) {
-		return selected_finality_proof;
+	let (selected_header_number, finality_proof) =
+		&recent_finality_proofs[selected_finality_proof_index];
+	let has_selected_finality_proof = intersection.contains(selected_header_number);
+	log::trace!(
+		target: "bridge",
+		"Trying to improve selected {} finality proof {:?}. Headers range: [{:?}; {:?}]. Proofs range: [{:?}; {:?}].\
+		Trying to improve to: {:?}. Result: {}",
+		P::SOURCE_NAME,
+		selected_finality_proof.as_ref().map(|(h, _)| h.number()),
+		unjustified_range_begin,
+		unjustified_range_end,
+		buffered_range_begin,
+		buffered_range_end,
+		selected_header_number,
+		if has_selected_finality_proof { "improved" } else { "not improved" },
+	);
+	if !has_selected_finality_proof {
+		return selected_finality_proof
 	}
 
 	// now remove all obsolete headers and extract selected header
@@ -560,20 +611,15 @@ pub(crate) fn prune_recent_finality_proofs<P: FinalitySyncPipeline>(
 	recent_finality_proofs: &mut FinalityProofs<P>,
 	recent_finality_proofs_limit: usize,
 ) {
-	let position =
-		recent_finality_proofs.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number);
+	let position = recent_finality_proofs
+		.binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number);
 
 	// remove all obsolete elements
-	*recent_finality_proofs = recent_finality_proofs.split_off(
-		position
-			.map(|position| position + 1)
-			.unwrap_or_else(|position| position),
-	);
+	*recent_finality_proofs = recent_finality_proofs
+		.split_off(position.map(|position| position + 1).unwrap_or_else(|position| position));
 
 	// now - limit vec by size
-	let split_index = recent_finality_proofs
-		.len()
-		.saturating_sub(recent_finality_proofs_limit);
+	let split_index = recent_finality_proofs.len().saturating_sub(recent_finality_proofs_limit);
 	*recent_finality_proofs = recent_finality_proofs.split_off(split_index);
 }
 
@@ -585,15 +631,15 @@ fn print_sync_progress<P: FinalitySyncPipeline>(
 	let (prev_time, prev_best_number_at_target) = progress_context;
 	let now = Instant::now();
 
-	let need_update = now - prev_time > Duration::from_secs(10)
-		|| prev_best_number_at_target
+	let need_update = now - prev_time > Duration::from_secs(10) ||
+		prev_best_number_at_target
 			.map(|prev_best_number_at_target| {
 				best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into()
 			})
 			.unwrap_or(true);
 
 	if !need_update {
-		return (prev_time, prev_best_number_at_target);
+		return (prev_time, prev_best_number_at_target)
 	}
 
 	log::info!(
diff --git a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs
index d29c55cc4c3f81fd1de15510808e68eedd1584d0..e8f42593d1e36fcc76e5b851bf41f133c8f9118b 100644
--- a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs
+++ b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs
@@ -18,17 +18,21 @@
 
 #![cfg(test)]
 
-use crate::finality_loop::{
-	prune_recent_finality_proofs, read_finality_proofs_from_stream, run, select_better_recent_finality_proof,
-	select_header_to_submit, FinalityProofs, FinalitySyncParams, RestartableFinalityProofsStream, SourceClient,
-	TargetClient,
+use crate::{
+	finality_loop::{
+		prune_recent_finality_proofs, read_finality_proofs_from_stream, run,
+		select_better_recent_finality_proof, select_header_to_submit, FinalityProofs,
+		FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, TargetClient,
+	},
+	FinalityProof, FinalitySyncPipeline, SourceHeader,
 };
-use crate::{FinalityProof, FinalitySyncPipeline, SourceHeader};
 
 use async_trait::async_trait;
 use futures::{FutureExt, Stream, StreamExt};
 use parking_lot::Mutex;
-use relay_utils::{metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError};
+use relay_utils::{
+	metrics::MetricsParams, relay_loop::Client as RelayClient, MaybeConnectionError,
+};
 use std::{collections::HashMap, pin::Pin, sync::Arc, time::Duration};
 
 type IsMandatory = bool;
@@ -121,10 +125,7 @@ impl SourceClient<TestFinalitySyncPipeline> for TestSourceClient {
 	) -> Result<(TestSourceHeader, Option<TestFinalityProof>), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(&mut *data);
-		data.source_headers
-			.get(&number)
-			.cloned()
-			.ok_or(TestError::NonConnection)
+		data.source_headers.get(&number).cloned().ok_or(TestError::NonConnection)
 	}
 
 	async fn finality_proofs(&self) -> Result<Self::FinalityProofsStream, TestError> {
@@ -157,7 +158,11 @@ impl TargetClient<TestFinalitySyncPipeline> for TestTargetClient {
 		Ok(data.target_best_block_number)
 	}
 
-	async fn submit_finality_proof(&self, header: TestSourceHeader, proof: TestFinalityProof) -> Result<(), TestError> {
+	async fn submit_finality_proof(
+		&self,
+		header: TestSourceHeader,
+		proof: TestFinalityProof,
+	) -> Result<(), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(&mut *data);
 		data.target_best_block_number = header.number();
@@ -171,11 +176,12 @@ fn prepare_test_clients(
 	state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static,
 	source_headers: HashMap<TestNumber, (TestSourceHeader, Option<TestFinalityProof>)>,
 ) -> (TestSourceClient, TestTargetClient) {
-	let internal_state_function: Arc<dyn Fn(&mut ClientsData) + Send + Sync> = Arc::new(move |data| {
-		if state_function(data) {
-			exit_sender.unbounded_send(()).unwrap();
-		}
-	});
+	let internal_state_function: Arc<dyn Fn(&mut ClientsData) + Send + Sync> =
+		Arc::new(move |data| {
+			if state_function(data) {
+				exit_sender.unbounded_send(()).unwrap();
+			}
+		});
 	let clients_data = Arc::new(Mutex::new(ClientsData {
 		source_best_block_number: 10,
 		source_headers,
@@ -189,14 +195,13 @@ fn prepare_test_clients(
 			on_method_call: internal_state_function.clone(),
 			data: clients_data.clone(),
 		},
-		TestTargetClient {
-			on_method_call: internal_state_function,
-			data: clients_data,
-		},
+		TestTargetClient { on_method_call: internal_state_function, data: clients_data },
 	)
 }
 
-fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static) -> ClientsData {
+fn run_sync_loop(
+	state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static,
+) -> ClientsData {
 	let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded();
 	let (source_client, target_client) = prepare_test_clients(
 		exit_sender,
@@ -234,12 +239,13 @@ fn run_sync_loop(state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync
 #[test]
 fn finality_sync_loop_works() {
 	let client_data = run_sync_loop(|data| {
-		// header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, because
-		// header#8 has persistent finality proof && it is mandatory => it is submitted
-		// header#9 has persistent finality proof, but it isn't mandatory => it is submitted, because
-		//   there are no more persistent finality proofs
+		// header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted,
+		// because header#8 has persistent finality proof && it is mandatory => it is submitted
+		// header#9 has persistent finality proof, but it isn't mandatory => it is submitted,
+		// because   there are no more persistent finality proofs
 		//
-		// once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from the stream
+		// once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from
+		// the stream
 		if data.target_best_block_number == 9 {
 			data.source_best_block_number = 14;
 			data.source_headers.insert(11, (TestSourceHeader(false, 11), None));
@@ -287,10 +293,7 @@ fn run_only_mandatory_headers_mode_test(
 		vec![
 			(6, (TestSourceHeader(false, 6), Some(TestFinalityProof(6)))),
 			(7, (TestSourceHeader(false, 7), Some(TestFinalityProof(7)))),
-			(
-				8,
-				(TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8))),
-			),
+			(8, (TestSourceHeader(has_mandatory_headers, 8), Some(TestFinalityProof(8)))),
 			(9, (TestSourceHeader(false, 9), Some(TestFinalityProof(9)))),
 			(10, (TestSourceHeader(false, 10), Some(TestFinalityProof(10)))),
 		]
@@ -357,7 +360,8 @@ fn select_better_recent_finality_proof_works() {
 		Some((TestSourceHeader(false, 2), TestFinalityProof(2))),
 	);
 
-	// if there's no intersection between recent finality proofs and unjustified headers, nothing is changed
+	// if there's no intersection between recent finality proofs and unjustified headers, nothing is
+	// changed
 	let mut unjustified_headers = vec![TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
@@ -368,13 +372,10 @@ fn select_better_recent_finality_proof_works() {
 		Some((TestSourceHeader(false, 2), TestFinalityProof(2))),
 	);
 
-	// if there's intersection between recent finality proofs and unjustified headers, but there are no
-	// proofs in this intersection, nothing is changed
-	let mut unjustified_headers = vec![
-		TestSourceHeader(false, 8),
-		TestSourceHeader(false, 9),
-		TestSourceHeader(false, 10),
-	];
+	// if there's intersection between recent finality proofs and unjustified headers, but there are
+	// no proofs in this intersection, nothing is changed
+	let mut unjustified_headers =
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
 			&[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))],
@@ -385,22 +386,15 @@ fn select_better_recent_finality_proof_works() {
 	);
 	assert_eq!(
 		unjustified_headers,
-		vec![
-			TestSourceHeader(false, 8),
-			TestSourceHeader(false, 9),
-			TestSourceHeader(false, 10)
-		]
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)]
 	);
 
 	// if there's intersection between recent finality proofs and unjustified headers and there's
 	// a proof in this intersection:
 	// - this better (last from intersection) proof is selected;
 	// - 'obsolete' unjustified headers are pruned.
-	let mut unjustified_headers = vec![
-		TestSourceHeader(false, 8),
-		TestSourceHeader(false, 9),
-		TestSourceHeader(false, 10),
-	];
+	let mut unjustified_headers =
+		vec![TestSourceHeader(false, 8), TestSourceHeader(false, 9), TestSourceHeader(false, 10)];
 	assert_eq!(
 		select_better_recent_finality_proof::<TestFinalitySyncPipeline>(
 			&[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))],
@@ -416,7 +410,10 @@ fn read_finality_proofs_from_stream_works() {
 	// when stream is currently empty, nothing is changed
 	let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))];
 	let mut stream = futures::stream::pending().into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
+	);
 	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]);
 	assert!(!stream.needs_restart);
 
@@ -424,20 +421,20 @@ fn read_finality_proofs_from_stream_works() {
 	let mut stream = futures::stream::iter(vec![TestFinalityProof(4)])
 		.chain(futures::stream::pending())
 		.into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
-	assert_eq!(
-		recent_finality_proofs,
-		vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
 	);
+	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]);
 	assert!(!stream.needs_restart);
 
 	// when stream has ended, we'll need to restart it
 	let mut stream = futures::stream::empty().into();
-	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(&mut stream, &mut recent_finality_proofs);
-	assert_eq!(
-		recent_finality_proofs,
-		vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]
+	read_finality_proofs_from_stream::<TestFinalitySyncPipeline, _>(
+		&mut stream,
+		&mut recent_finality_proofs,
 	);
+	assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]);
 	assert!(stream.needs_restart);
 }
 
diff --git a/polkadot/bridges/relays/finality/src/lib.rs b/polkadot/bridges/relays/finality/src/lib.rs
index 64ec5bed05005ff4664660b27feb094bee157675..78ef33f1b376b8d2815f3361e02275ef4efb0bab 100644
--- a/polkadot/bridges/relays/finality/src/lib.rs
+++ b/polkadot/bridges/relays/finality/src/lib.rs
@@ -19,7 +19,9 @@
 //! are still submitted to the target node, but are treated as auxiliary data as we are not trying
 //! to submit all source headers to the target node.
 
-pub use crate::finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient};
+pub use crate::finality_loop::{
+	metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient,
+};
 
 use bp_header_chain::FinalityProof;
 use std::fmt::Debug;
diff --git a/polkadot/bridges/relays/headers/src/headers.rs b/polkadot/bridges/relays/headers/src/headers.rs
index 0b948d9da4cc6e0bc6ae89cb133dd410e513aec5..8d67c1cf48574edf17856e06341f9e507438c829 100644
--- a/polkadot/bridges/relays/headers/src/headers.rs
+++ b/polkadot/bridges/relays/headers/src/headers.rs
@@ -20,22 +20,33 @@
 //! may stay until source/target chain state isn't updated. When a header reaches the
 //! `ready` sub-queue, it may be submitted to the target chain.
 
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader};
+use crate::sync_types::{
+	HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SourceHeader,
+};
 
 use linked_hash_map::LinkedHashMap;
 use num_traits::{One, Zero};
 use relay_utils::HeaderId;
 use std::{
-	collections::{btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap, HashSet},
+	collections::{
+		btree_map::Entry as BTreeMapEntry, hash_map::Entry as HashMapEntry, BTreeMap, HashMap,
+		HashSet,
+	},
 	time::{Duration, Instant},
 };
 
-type HeadersQueue<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, QueuedHeader<P>>>;
-type SyncedChildren<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, HashSet<HeaderIdOf<P>>>>;
-type KnownHeaders<P> =
-	BTreeMap<<P as HeadersSyncPipeline>::Number, HashMap<<P as HeadersSyncPipeline>::Hash, HeaderStatus>>;
+type HeadersQueue<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, QueuedHeader<P>>,
+>;
+type SyncedChildren<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, HashSet<HeaderIdOf<P>>>,
+>;
+type KnownHeaders<P> = BTreeMap<
+	<P as HeadersSyncPipeline>::Number,
+	HashMap<<P as HeadersSyncPipeline>::Hash, HeaderStatus>,
+>;
 
 /// We're trying to fetch completion data for single header at this interval.
 const RETRY_FETCH_COMPLETION_INTERVAL: Duration = Duration::from_secs(20);
@@ -65,7 +76,7 @@ pub struct QueuedHeaders<P: HeadersSyncPipeline> {
 	/// Headers that are (we believe) currently submitted to target node by our,
 	/// not-yet mined transactions.
 	submitted: HeadersQueue<P>,
-	/// Synced headers childrens. We need it to support case when header is synced, but some of
+	/// Synced headers children. We need it to support case when header is synced, but some of
 	/// its parents are incomplete.
 	synced_children: SyncedChildren<P>,
 	/// Pointers to all headers that we ever seen and we believe we can touch in the future.
@@ -82,15 +93,6 @@ pub struct QueuedHeaders<P: HeadersSyncPipeline> {
 	prune_border: P::Number,
 }
 
-/// Header completion data.
-#[derive(Debug)]
-struct HeaderCompletion<Completion> {
-	/// Last time when we tried to upload completion data to target node, if ever.
-	pub last_upload_time: Option<Instant>,
-	/// Completion data.
-	pub completion: Completion,
-}
-
 impl<P: HeadersSyncPipeline> Default for QueuedHeaders<P> {
 	fn default() -> Self {
 		QueuedHeaders {
@@ -122,35 +124,31 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	pub fn headers_in_status(&self, status: HeaderStatus) -> usize {
 		match status {
 			HeaderStatus::Unknown | HeaderStatus::Synced => 0,
-			HeaderStatus::MaybeOrphan => self
-				.maybe_orphan
-				.values()
-				.fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Orphan => self.orphan.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::MaybeExtra => self
-				.maybe_extra
-				.values()
-				.fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Extra => self.extra.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Ready => self.ready.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Incomplete => self.incomplete.values().fold(0, |total, headers| total + headers.len()),
-			HeaderStatus::Submitted => self.submitted.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::MaybeOrphan =>
+				self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Orphan =>
+				self.orphan.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::MaybeExtra =>
+				self.maybe_extra.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Extra =>
+				self.extra.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Ready =>
+				self.ready.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Incomplete =>
+				self.incomplete.values().fold(0, |total, headers| total + headers.len()),
+			HeaderStatus::Submitted =>
+				self.submitted.values().fold(0, |total, headers| total + headers.len()),
 		}
 	}
 
 	/// Returns number of headers that are currently in the queue.
 	pub fn total_headers(&self) -> usize {
-		self.maybe_orphan
-			.values()
-			.fold(0, |total, headers| total + headers.len())
-			+ self.orphan.values().fold(0, |total, headers| total + headers.len())
-			+ self
-				.maybe_extra
-				.values()
-				.fold(0, |total, headers| total + headers.len())
-			+ self.extra.values().fold(0, |total, headers| total + headers.len())
-			+ self.ready.values().fold(0, |total, headers| total + headers.len())
-			+ self.incomplete.values().fold(0, |total, headers| total + headers.len())
+		self.maybe_orphan.values().fold(0, |total, headers| total + headers.len()) +
+			self.orphan.values().fold(0, |total, headers| total + headers.len()) +
+			self.maybe_extra.values().fold(0, |total, headers| total + headers.len()) +
+			self.extra.values().fold(0, |total, headers| total + headers.len()) +
+			self.ready.values().fold(0, |total, headers| total + headers.len()) +
+			self.incomplete.values().fold(0, |total, headers| total + headers.len())
 	}
 
 	/// Returns number of best block in the queue.
@@ -166,8 +164,16 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 						std::cmp::max(
 							self.ready.keys().next_back().cloned().unwrap_or_else(Zero::zero),
 							std::cmp::max(
-								self.incomplete.keys().next_back().cloned().unwrap_or_else(Zero::zero),
-								self.submitted.keys().next_back().cloned().unwrap_or_else(Zero::zero),
+								self.incomplete
+									.keys()
+									.next_back()
+									.cloned()
+									.unwrap_or_else(Zero::zero),
+								self.submitted
+									.keys()
+									.next_back()
+									.cloned()
+									.unwrap_or_else(Zero::zero),
 							),
 						),
 					),
@@ -191,7 +197,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			.unwrap_or(HeaderStatus::Unknown)
 	}
 
-	/// Get oldest header from given queue.
+	/// Get the oldest header from given queue.
 	pub fn header(&self, status: HeaderStatus) -> Option<&QueuedHeader<P>> {
 		match status {
 			HeaderStatus::Unknown | HeaderStatus::Synced => None,
@@ -205,7 +211,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		}
 	}
 
-	/// Get oldest headers from given queue until functor will return false.
+	/// Get the oldest headers from given queue until functor will return false.
 	pub fn headers(
 		&self,
 		status: HeaderStatus,
@@ -235,7 +241,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				id,
 				status,
 			);
-			return;
+			return
 		}
 
 		if id.0 < self.prune_border {
@@ -245,7 +251,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				P::SOURCE_NAME,
 				id,
 			);
-			return;
+			return
 		}
 
 		let parent_id = header.parent_id();
@@ -256,20 +262,20 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			HeaderStatus::Unknown | HeaderStatus::MaybeOrphan => {
 				insert_header(&mut self.maybe_orphan, id, header);
 				HeaderStatus::MaybeOrphan
-			}
+			},
 			HeaderStatus::Orphan => {
 				insert_header(&mut self.orphan, id, header);
 				HeaderStatus::Orphan
-			}
-			HeaderStatus::MaybeExtra
-			| HeaderStatus::Extra
-			| HeaderStatus::Ready
-			| HeaderStatus::Incomplete
-			| HeaderStatus::Submitted
-			| HeaderStatus::Synced => {
+			},
+			HeaderStatus::MaybeExtra |
+			HeaderStatus::Extra |
+			HeaderStatus::Ready |
+			HeaderStatus::Incomplete |
+			HeaderStatus::Submitted |
+			HeaderStatus::Synced => {
 				insert_header(&mut self.maybe_extra, id, header);
 				HeaderStatus::MaybeExtra
-			}
+			},
 		};
 
 		self.known_headers.entry(id.0).or_default().insert(id.1, status);
@@ -282,7 +288,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		);
 	}
 
-	/// Receive best header from the target node.
+	/// Receive the best header from the target node.
 	pub fn target_best_header_response(&mut self, id: &HeaderIdOf<P>) {
 		self.header_synced(id)
 	}
@@ -297,7 +303,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 				HeaderStatus::Orphan,
 				id,
 			);
-			return;
+			return
 		}
 
 		move_header_descendants::<P>(
@@ -360,8 +366,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 					id,
 				);
 
-				return;
-			}
+				return
+			},
 		};
 
 		// do not remove from `incomplete_headers` here, because otherwise we'll miss
@@ -423,14 +429,20 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	}
 
 	/// Marks given headers incomplete.
-	pub fn add_incomplete_headers(&mut self, make_header_incomplete: bool, new_incomplete_headers: Vec<HeaderIdOf<P>>) {
+	pub fn add_incomplete_headers(
+		&mut self,
+		make_header_incomplete: bool,
+		new_incomplete_headers: Vec<HeaderIdOf<P>>,
+	) {
 		for new_incomplete_header in new_incomplete_headers {
 			if make_header_incomplete {
 				self.header_synced(&new_incomplete_header);
 			}
 
-			let move_origins = select_synced_children::<P>(&self.synced_children, &new_incomplete_header);
-			let move_origins = move_origins.into_iter().chain(std::iter::once(new_incomplete_header));
+			let move_origins =
+				select_synced_children::<P>(&self.synced_children, &new_incomplete_header);
+			let move_origins =
+				move_origins.into_iter().chain(std::iter::once(new_incomplete_header));
 			for move_origin in move_origins {
 				move_header_descendants::<P>(
 					&mut [&mut self.ready, &mut self.submitted],
@@ -453,13 +465,15 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		}
 	}
 
-	/// When incomplete headers ids are receved from target node.
+	/// When incomplete headers ids are received from target node.
 	pub fn incomplete_headers_response(&mut self, ids: HashSet<HeaderIdOf<P>>) {
 		// all new incomplete headers are marked Synced and all their descendants
 		// are moved from Ready/Submitted to Incomplete queue
 		let new_incomplete_headers = ids
 			.iter()
-			.filter(|id| !self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id))
+			.filter(|id| {
+				!self.incomplete_headers.contains_key(id) && !self.completion_data.contains_key(id)
+			})
 			.cloned()
 			.collect::<Vec<_>>();
 		self.add_incomplete_headers(true, new_incomplete_headers);
@@ -477,8 +491,10 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			// sub2eth rejects H if H.Parent is incomplete
 			// sub2sub allows 'syncing' headers like that
 			// => let's check if there are some synced children of just completed header
-			let move_origins = select_synced_children::<P>(&self.synced_children, &just_completed_header);
-			let move_origins = move_origins.into_iter().chain(std::iter::once(just_completed_header));
+			let move_origins =
+				select_synced_children::<P>(&self.synced_children, &just_completed_header);
+			let move_origins =
+				move_origins.into_iter().chain(std::iter::once(just_completed_header));
 			for move_origin in move_origins {
 				move_header_descendants::<P>(
 					&mut [&mut self.incomplete],
@@ -509,7 +525,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	pub fn incomplete_header(&mut self) -> Option<HeaderIdOf<P>> {
 		queued_incomplete_header(&mut self.incomplete_headers, |last_fetch_time| {
 			let retry = match *last_fetch_time {
-				Some(last_fetch_time) => last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL,
+				Some(last_fetch_time) =>
+					last_fetch_time.elapsed() > RETRY_FETCH_COMPLETION_INTERVAL,
 				None => true,
 			};
 
@@ -530,7 +547,7 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 	/// Prune and never accept headers before this block.
 	pub fn prune(&mut self, prune_border: P::Number) {
 		if prune_border <= self.prune_border {
-			return;
+			return
 		}
 
 		prune_queue(&mut self.maybe_orphan, prune_border);
@@ -579,10 +596,10 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 		match header {
 			Some(header) => {
 				let parent_id = header.header().parent_id();
-				self.incomplete_headers.contains_key(&parent_id)
-					|| self.completion_data.contains_key(&parent_id)
-					|| self.status(&parent_id) == HeaderStatus::Incomplete
-			}
+				self.incomplete_headers.contains_key(&parent_id) ||
+					self.completion_data.contains_key(&parent_id) ||
+					self.status(&parent_id) == HeaderStatus::Incomplete
+			},
 			None => false,
 		}
 	}
@@ -612,12 +629,8 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 			.expect("header has a given status; given queue has the header; qed");
 
 			// remember ids of all the children of the current header
-			let synced_children_entry = self
-				.synced_children
-				.entry(current.0)
-				.or_default()
-				.entry(current.1)
-				.or_default();
+			let synced_children_entry =
+				self.synced_children.entry(current.0).or_default().entry(current.1).or_default();
 			let all_queues = [
 				&self.maybe_orphan,
 				&self.orphan,
@@ -633,7 +646,9 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 					.map(|potential_children| {
 						potential_children
 							.values()
-							.filter(|potential_child| potential_child.header().parent_id() == current)
+							.filter(|potential_child| {
+								potential_child.header().parent_id() == current
+							})
 							.map(|child| child.id())
 							.collect::<Vec<_>>()
 					})
@@ -670,12 +685,19 @@ impl<P: HeadersSyncPipeline> QueuedHeaders<P> {
 }
 
 /// Insert header to the queue.
-fn insert_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: HeaderIdOf<P>, header: QueuedHeader<P>) {
+fn insert_header<P: HeadersSyncPipeline>(
+	queue: &mut HeadersQueue<P>,
+	id: HeaderIdOf<P>,
+	header: QueuedHeader<P>,
+) {
 	queue.entry(id.0).or_default().insert(id.1, header);
 }
 
 /// Remove header from the queue.
-fn remove_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<QueuedHeader<P>> {
+fn remove_header<P: HeadersSyncPipeline>(
+	queue: &mut HeadersQueue<P>,
+	id: &HeaderIdOf<P>,
+) -> Option<QueuedHeader<P>> {
 	let mut headers_at = match queue.entry(id.0) {
 		BTreeMapEntry::Occupied(headers_at) => headers_at,
 		BTreeMapEntry::Vacant(_) => return None,
@@ -689,7 +711,10 @@ fn remove_header<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, id: &Heade
 }
 
 /// Get header from the queue.
-fn header<'a, P: HeadersSyncPipeline>(queue: &'a HeadersQueue<P>, id: &HeaderIdOf<P>) -> Option<&'a QueuedHeader<P>> {
+fn header<'a, P: HeadersSyncPipeline>(
+	queue: &'a HeadersQueue<P>,
+	id: &HeaderIdOf<P>,
+) -> Option<&'a QueuedHeader<P>> {
 	queue.get(&id.0).and_then(|by_hash| by_hash.get(&id.1))
 }
 
@@ -808,11 +833,7 @@ fn oldest_headers<P: HeadersSyncPipeline>(
 	queue: &HeadersQueue<P>,
 	mut f: impl FnMut(&QueuedHeader<P>) -> bool,
 ) -> Option<Vec<&QueuedHeader<P>>> {
-	let result = queue
-		.values()
-		.flat_map(|h| h.values())
-		.take_while(|h| f(h))
-		.collect::<Vec<_>>();
+	let result = queue.values().flat_map(|h| h.values()).take_while(|h| f(h)).collect::<Vec<_>>();
 	if result.is_empty() {
 		None
 	} else {
@@ -826,7 +847,10 @@ fn prune_queue<P: HeadersSyncPipeline>(queue: &mut HeadersQueue<P>, prune_border
 }
 
 /// Forget all known headers with number less than given.
-fn prune_known_headers<P: HeadersSyncPipeline>(known_headers: &mut KnownHeaders<P>, prune_border: P::Number) {
+fn prune_known_headers<P: HeadersSyncPipeline>(
+	known_headers: &mut KnownHeaders<P>,
+	prune_border: P::Number,
+) {
 	let new_known_headers = known_headers.split_off(&prune_border);
 	for (pruned_number, pruned_headers) in &*known_headers {
 		for pruned_hash in pruned_headers.keys() {
@@ -857,8 +881,8 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 	map: &mut LinkedHashMap<Id, T>,
 	filter: impl FnMut(&mut T) -> bool,
 ) -> Option<(Id, &T)> {
-	// TODO (#84): headers that have been just appended to the end of the queue would have to wait until
-	// all previous headers will be retried
+	// TODO (#84): headers that have been just appended to the end of the queue would have to wait
+	// until all previous headers will be retried
 
 	let retry_old_header = map
 		.front()
@@ -866,9 +890,10 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 		.and_then(|key| map.get_mut(&key).map(filter))
 		.unwrap_or(false);
 	if retry_old_header {
-		let (header_key, header) = map.pop_front().expect("we have checked that front() exists; qed");
+		let (header_key, header) =
+			map.pop_front().expect("we have checked that front() exists; qed");
 		map.insert(header_key, header);
-		return map.back().map(|(id, data)| (id.clone(), data));
+		return map.back().map(|(id, data)| (id.clone(), data))
 	}
 
 	None
@@ -877,15 +902,15 @@ fn queued_incomplete_header<Id: Clone + Eq + std::hash::Hash, T>(
 #[cfg(test)]
 pub(crate) mod tests {
 	use super::*;
-	use crate::sync_loop_tests::{TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber};
-	use crate::sync_types::QueuedHeader;
+	use crate::{
+		sync_loop_tests::{
+			TestHash, TestHeader, TestHeaderId, TestHeadersSyncPipeline, TestNumber,
+		},
+		sync_types::QueuedHeader,
+	};
 
 	pub(crate) fn header(number: TestNumber) -> QueuedHeader<TestHeadersSyncPipeline> {
-		QueuedHeader::new(TestHeader {
-			number,
-			hash: hash(number),
-			parent_hash: hash(number - 1),
-		})
+		QueuedHeader::new(TestHeader { number, hash: hash(number), parent_hash: hash(number - 1) })
 	}
 
 	pub(crate) fn hash(number: TestNumber) -> TestHash {
@@ -900,34 +925,41 @@ pub(crate) mod tests {
 	fn total_headers_works() {
 		// total headers just sums up number of headers in every queue
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(1),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(2),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(2).or_default().insert(
-			hash(3),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.orphan.entry(3).or_default().insert(
-			hash(4),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_extra.entry(4).or_default().insert(
-			hash(5),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.ready.entry(5).or_default().insert(
-			hash(6),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.incomplete.entry(6).or_default().insert(
-			hash(7),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(1), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(2), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(2)
+			.or_default()
+			.insert(hash(3), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.orphan
+			.entry(3)
+			.or_default()
+			.insert(hash(4), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_extra
+			.entry(4)
+			.or_default()
+			.insert(hash(5), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.ready
+			.entry(5)
+			.or_default()
+			.insert(hash(6), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.incomplete
+			.entry(6)
+			.or_default()
+			.insert(hash(7), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.total_headers(), 7);
 	}
 
@@ -935,48 +967,56 @@ pub(crate) mod tests {
 	fn best_queued_number_works() {
 		// initially there are headers in MaybeOrphan queue only
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(1),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(1).or_default().insert(
-			hash(2),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
-		queue.maybe_orphan.entry(3).or_default().insert(
-			hash(3),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(1), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(1)
+			.or_default()
+			.insert(hash(2), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
+		queue
+			.maybe_orphan
+			.entry(3)
+			.or_default()
+			.insert(hash(3), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 3);
 		// and then there's better header in Orphan
-		queue.orphan.entry(10).or_default().insert(
-			hash(10),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.orphan
+			.entry(10)
+			.or_default()
+			.insert(hash(10), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 10);
 		// and then there's better header in MaybeExtra
-		queue.maybe_extra.entry(20).or_default().insert(
-			hash(20),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_extra
+			.entry(20)
+			.or_default()
+			.insert(hash(20), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 20);
 		// and then there's better header in Ready
-		queue.ready.entry(30).or_default().insert(
-			hash(30),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.ready
+			.entry(30)
+			.or_default()
+			.insert(hash(30), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 30);
 		// and then there's better header in MaybeOrphan again
-		queue.maybe_orphan.entry(40).or_default().insert(
-			hash(40),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.maybe_orphan
+			.entry(40)
+			.or_default()
+			.insert(hash(40), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 40);
 		// and then there's some header in Incomplete
-		queue.incomplete.entry(50).or_default().insert(
-			hash(50),
-			QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()),
-		);
+		queue
+			.incomplete
+			.entry(50)
+			.or_default()
+			.insert(hash(50), QueuedHeader::<TestHeadersSyncPipeline>::new(Default::default()));
 		assert_eq!(queue.best_queued_number(), 50);
 	}
 
@@ -986,11 +1026,7 @@ pub(crate) mod tests {
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 		assert_eq!(queue.status(&id(10)), HeaderStatus::Unknown);
 		// and status is read from the KnownHeaders
-		queue
-			.known_headers
-			.entry(10)
-			.or_default()
-			.insert(hash(10), HeaderStatus::Ready);
+		queue.known_headers.entry(10).or_default().insert(hash(10), HeaderStatus::Ready);
 		assert_eq!(queue.status(&id(10)), HeaderStatus::Ready);
 	}
 
@@ -999,22 +1035,13 @@ pub(crate) mod tests {
 		// initially we have oldest header #10
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 		queue.maybe_orphan.entry(10).or_default().insert(hash(1), header(100));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(100)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100));
 		// inserting #20 changes nothing
 		queue.maybe_orphan.entry(20).or_default().insert(hash(1), header(101));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(100)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(100));
 		// inserting #5 makes it oldest
 		queue.maybe_orphan.entry(5).or_default().insert(hash(1), header(102));
-		assert_eq!(
-			queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash,
-			hash(102)
-		);
+		assert_eq!(queue.header(HeaderStatus::MaybeOrphan).unwrap().header().hash, hash(102));
 	}
 
 	#[test]
@@ -1100,11 +1127,7 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(99)
@@ -1117,17 +1140,9 @@ pub(crate) mod tests {
 			.or_default()
 			.insert(hash(98), HeaderStatus::MaybeExtra);
 		queue.maybe_extra.entry(98).or_default().insert(hash(98), header(98));
-		queue
-			.known_headers
-			.entry(97)
-			.or_default()
-			.insert(hash(97), HeaderStatus::Extra);
+		queue.known_headers.entry(97).or_default().insert(hash(97), HeaderStatus::Extra);
 		queue.extra.entry(97).or_default().insert(hash(97), header(97));
-		queue
-			.known_headers
-			.entry(96)
-			.or_default()
-			.insert(hash(96), HeaderStatus::Ready);
+		queue.known_headers.entry(96).or_default().insert(hash(96), HeaderStatus::Ready);
 		queue.ready.entry(96).or_default().insert(hash(96), header(96));
 		queue.target_best_header_response(&id(100));
 
@@ -1146,31 +1161,19 @@ pub(crate) mod tests {
 		// children of synced headers are stored
 		assert_eq!(
 			vec![id(97)],
-			queue.synced_children[&96][&hash(96)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&96][&hash(96)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(98)],
-			queue.synced_children[&97][&hash(97)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&97][&hash(97)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(99)],
-			queue.synced_children[&98][&hash(98)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&98][&hash(98)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(
 			vec![id(100)],
-			queue.synced_children[&99][&hash(99)]
-				.iter()
-				.cloned()
-				.collect::<Vec<_>>()
+			queue.synced_children[&99][&hash(99)].iter().cloned().collect::<Vec<_>>()
 		);
 		assert_eq!(0, queue.synced_children[&100][&hash(100)].len());
 	}
@@ -1194,11 +1197,7 @@ pub(crate) mod tests {
 			.entry(102)
 			.or_default()
 			.insert(hash(102), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(102)
-			.or_default()
-			.insert(hash(102), header(102));
+		queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102));
 		queue
 			.known_headers
 			.entry(103)
@@ -1230,11 +1229,7 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(101)
@@ -1246,11 +1241,7 @@ pub(crate) mod tests {
 			.entry(102)
 			.or_default()
 			.insert(hash(102), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(102)
-			.or_default()
-			.insert(hash(102), header(102));
+		queue.maybe_orphan.entry(102).or_default().insert(hash(102), header(102));
 		queue.maybe_orphan_response(&id(99), true);
 
 		// then all headers (#100..#103) are moved to the MaybeExtra queue
@@ -1275,21 +1266,13 @@ pub(crate) mod tests {
 			.entry(100)
 			.or_default()
 			.insert(hash(100), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(100)
-			.or_default()
-			.insert(hash(100), header(100));
+		queue.maybe_orphan.entry(100).or_default().insert(hash(100), header(100));
 		queue
 			.known_headers
 			.entry(101)
 			.or_default()
 			.insert(hash(101), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(101)
-			.or_default()
-			.insert(hash(101), header(101));
+		queue.maybe_orphan.entry(101).or_default().insert(hash(101), header(101));
 		queue.maybe_orphan_response(&id(99), false);
 
 		// then all headers (#100..#101) are moved to the Orphan queue
@@ -1404,7 +1387,9 @@ pub(crate) mod tests {
 		queue.incomplete_headers.clear();
 		queue.incomplete_headers.insert(
 			id(100),
-			Some(Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL),
+			Some(
+				Instant::now() - RETRY_FETCH_COMPLETION_INTERVAL - RETRY_FETCH_COMPLETION_INTERVAL,
+			),
 		);
 		assert_eq!(queue.incomplete_header(), Some(id(100)));
 	}
@@ -1560,11 +1545,7 @@ pub(crate) mod tests {
 			.entry(104)
 			.or_default()
 			.insert(hash(104), HeaderStatus::MaybeOrphan);
-		queue
-			.maybe_orphan
-			.entry(104)
-			.or_default()
-			.insert(hash(104), header(104));
+		queue.maybe_orphan.entry(104).or_default().insert(hash(104), header(104));
 		queue
 			.known_headers
 			.entry(103)
@@ -1633,7 +1614,8 @@ pub(crate) mod tests {
 	fn incomplete_headers_are_still_incomplete_after_advance() {
 		let mut queue = QueuedHeaders::<TestHeadersSyncPipeline>::default();
 
-		// relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete queue
+		// relay#1 knows that header#100 is incomplete && it has headers 101..104 in incomplete
+		// queue
 		queue.incomplete_headers.insert(id(100), None);
 		queue.incomplete.entry(101).or_default().insert(hash(101), header(101));
 		queue.incomplete.entry(102).or_default().insert(hash(102), header(102));
@@ -1665,8 +1647,8 @@ pub(crate) mod tests {
 			.or_default()
 			.insert(hash(104), HeaderStatus::Incomplete);
 
-		// let's say relay#2 completes header#100 and then submits header#101+header#102 and it turns
-		// out that header#102 is also incomplete
+		// let's say relay#2 completes header#100 and then submits header#101+header#102 and it
+		// turns out that header#102 is also incomplete
 		queue.incomplete_headers_response(vec![id(102)].into_iter().collect());
 
 		// then the header#103 and the header#104 must have Incomplete status
diff --git a/polkadot/bridges/relays/headers/src/sync.rs b/polkadot/bridges/relays/headers/src/sync.rs
index e992b1f8e583c14a109a240da8c3c7dc676d6192..012b63f0dc59c355bc06b9e01833f61d8f9e85a4 100644
--- a/polkadot/bridges/relays/headers/src/sync.rs
+++ b/polkadot/bridges/relays/headers/src/sync.rs
@@ -19,8 +19,10 @@
 //! to submit to the target chain? The context makes decisions basing on parameters
 //! passed using `HeadersSyncParams` structure.
 
-use crate::headers::QueuedHeaders;
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader};
+use crate::{
+	headers::QueuedHeaders,
+	sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader},
+};
 use num_traits::{One, Saturating, Zero};
 
 /// Common sync params.
@@ -35,7 +37,7 @@ pub struct HeadersSyncParams {
 	/// Maximal total headers size in single submit request.
 	pub max_headers_size_in_single_submit: usize,
 	/// We only may store and accept (from Ethereum node) headers that have
-	/// number >= than best_substrate_header.number - prune_depth.
+	/// number >= than "best_substrate_header.number" - "prune_depth".
 	pub prune_depth: u32,
 	/// Target transactions mode.
 	pub target_tx_mode: TargetTransactionMode,
@@ -58,9 +60,9 @@ pub enum TargetTransactionMode {
 pub struct HeadersSync<P: HeadersSyncPipeline> {
 	/// Synchronization parameters.
 	params: HeadersSyncParams,
-	/// Best header number known to source node.
+	/// The best header number known to source node.
 	source_best_number: Option<P::Number>,
-	/// Best header known to target node.
+	/// The best header known to target node.
 	target_best_header: Option<HeaderIdOf<P>>,
 	/// Headers queue.
 	headers: QueuedHeaders<P>,
@@ -85,7 +87,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		self.source_best_number
 	}
 
-	/// Best header known to target node.
+	/// The best header known to target node.
 	pub fn target_best_header(&self) -> Option<HeaderIdOf<P>> {
 		self.target_best_header
 	}
@@ -121,36 +123,37 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		// if we haven't received best header from source node yet, there's nothing we can download
 		let source_best_number = self.source_best_number?;
 
-		// if we haven't received known best header from target node yet, there's nothing we can download
+		// if we haven't received known best header from target node yet, there's nothing we can
+		// download
 		let target_best_header = self.target_best_header.as_ref()?;
 
 		// if there's too many headers in the queue, stop downloading
 		let in_memory_headers = self.headers.total_headers();
 		if in_memory_headers >= self.params.max_future_headers_to_download {
-			return None;
+			return None
 		}
 
 		// if queue is empty and best header on target is > than best header on source,
-		// then we shoud reorg
+		// then we shoud reorganization
 		let best_queued_number = self.headers.best_queued_number();
 		if best_queued_number.is_zero() && source_best_number < target_best_header.0 {
-			return Some(source_best_number);
+			return Some(source_best_number)
 		}
 
-		// we assume that there were no reorgs if we have already downloaded best header
+		// we assume that there were no reorganizations if we have already downloaded best header
 		let best_downloaded_number = std::cmp::max(
 			std::cmp::max(best_queued_number, self.headers.best_synced_number()),
 			target_best_header.0,
 		);
 		if best_downloaded_number >= source_best_number {
-			return None;
+			return None
 		}
 
 		// download new header
 		Some(best_downloaded_number + One::one())
 	}
 
-	/// Selech orphan header to downoload.
+	/// Select orphan header to download.
 	pub fn select_orphan_header_to_download(&self) -> Option<&QueuedHeader<P>> {
 		let orphan_header = self.headers.header(HeaderStatus::Orphan)?;
 
@@ -159,7 +162,7 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		// => let's avoid fetching duplicate headers
 		let parent_id = orphan_header.parent_id();
 		if self.headers.status(&parent_id) != HeaderStatus::Unknown {
-			return None;
+			return None
 		}
 
 		Some(orphan_header)
@@ -169,12 +172,12 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 	pub fn select_headers_to_submit(&self, stalled: bool) -> Option<Vec<&QueuedHeader<P>>> {
 		// maybe we have paused new headers submit?
 		if self.pause_submit {
-			return None;
+			return None
 		}
 
 		// if we operate in backup mode, we only submit headers when sync has stalled
 		if self.params.target_tx_mode == TargetTransactionMode::Backup && !stalled {
-			return None;
+			return None
 		}
 
 		let headers_in_submit_status = self.headers.headers_in_status(HeaderStatus::Submitted);
@@ -187,15 +190,17 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 		let mut total_headers = 0;
 		self.headers.headers(HeaderStatus::Ready, |header| {
 			if total_headers == headers_to_submit_count {
-				return false;
+				return false
 			}
 			if total_headers == self.params.max_headers_in_single_submit {
-				return false;
+				return false
 			}
 
 			let encoded_size = P::estimate_size(header);
-			if total_headers != 0 && total_size + encoded_size > self.params.max_headers_size_in_single_submit {
-				return false;
+			if total_headers != 0 &&
+				total_size + encoded_size > self.params.max_headers_size_in_single_submit
+			{
+				return false
 			}
 
 			total_size += encoded_size;
@@ -228,15 +233,14 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 
 		// early return if it is still the same
 		if self.target_best_header == Some(best_header) {
-			return false;
+			return false
 		}
 
 		// remember that this header is now known to the Substrate runtime
 		self.headers.target_best_header_response(&best_header);
 
 		// prune ancient headers
-		self.headers
-			.prune(best_header.0.saturating_sub(self.params.prune_depth.into()));
+		self.headers.prune(best_header.0.saturating_sub(self.params.prune_depth.into()));
 
 		// finally remember the best header itself
 		self.target_best_header = Some(best_header);
@@ -281,9 +285,11 @@ impl<P: HeadersSyncPipeline> HeadersSync<P> {
 #[cfg(test)]
 pub mod tests {
 	use super::*;
-	use crate::headers::tests::{header, id};
-	use crate::sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber};
-	use crate::sync_types::HeaderStatus;
+	use crate::{
+		headers::tests::{header, id},
+		sync_loop_tests::{TestHash, TestHeadersSyncPipeline, TestNumber},
+		sync_types::HeaderStatus,
+	};
 	use relay_utils::HeaderId;
 
 	fn side_hash(number: TestNumber) -> TestHash {
diff --git a/polkadot/bridges/relays/headers/src/sync_loop.rs b/polkadot/bridges/relays/headers/src/sync_loop.rs
index b204932056509a4891788a1cc284f30ebd3a9b8d..da8d23dc39dec1b89241b65f7f596690bb985955 100644
--- a/polkadot/bridges/relays/headers/src/sync_loop.rs
+++ b/polkadot/bridges/relays/headers/src/sync_loop.rs
@@ -16,9 +16,11 @@
 
 //! Entrypoint for running headers synchronization loop.
 
-use crate::sync::{HeadersSync, HeadersSyncParams};
-use crate::sync_loop_metrics::SyncLoopMetrics;
-use crate::sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders};
+use crate::{
+	sync::{HeadersSync, HeadersSyncParams},
+	sync_loop_metrics::SyncLoopMetrics,
+	sync_types::{HeaderIdOf, HeaderStatus, HeadersSyncPipeline, QueuedHeader, SubmittedHeaders},
+};
 
 use async_trait::async_trait;
 use futures::{future::FutureExt, stream::StreamExt};
@@ -38,13 +40,13 @@ use std::{
 
 /// When we submit headers to target node, but see no updates of best
 /// source block known to target node during STALL_SYNC_TIMEOUT seconds,
-/// we consider that our headers are rejected because there has been reorg in target chain.
-/// This reorg could invalidate our knowledge about sync process (i.e. we have asked if
-/// HeaderA is known to target, but then reorg happened and the answer is different
+/// we consider that our headers are rejected because there has been reorganization in target chain.
+/// This reorganization could invalidate our knowledge about sync process (i.e. we have asked if
+/// HeaderA is known to target, but then reorganization happened and the answer is different
 /// now) => we need to reset sync.
 /// The other option is to receive **EVERY** best target header and check if it is
 /// direct child of previous best header. But: (1) subscription doesn't guarantee that
-/// the subscriber will receive every best header (2) reorg won't always lead to sync
+/// the subscriber will receive every best header (2) reorganization won't always lead to sync
 /// stall and restart is a heavy operation (we forget all in-memory headers).
 const STALL_SYNC_TIMEOUT: Duration = Duration::from_secs(5 * 60);
 /// Delay after we have seen update of best source header at target node,
@@ -66,8 +68,10 @@ pub trait SourceClient<P: HeadersSyncPipeline>: RelayClient {
 	async fn header_by_number(&self, number: P::Number) -> Result<P::Header, Self::Error>;
 
 	/// Get completion data by header hash.
-	async fn header_completion(&self, id: HeaderIdOf<P>)
-		-> Result<(HeaderIdOf<P>, Option<P::Completion>), Self::Error>;
+	async fn header_completion(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, Option<P::Completion>), Self::Error>;
 
 	/// Get extra data by header hash.
 	async fn header_extra(
@@ -80,24 +84,36 @@ pub trait SourceClient<P: HeadersSyncPipeline>: RelayClient {
 /// Target client trait.
 #[async_trait]
 pub trait TargetClient<P: HeadersSyncPipeline>: RelayClient {
-	/// Returns ID of best header known to the target node.
+	/// Returns ID of the best header known to the target node.
 	async fn best_header_id(&self) -> Result<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns true if header is known to the target node.
-	async fn is_known_header(&self, id: HeaderIdOf<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
+	async fn is_known_header(
+		&self,
+		id: HeaderIdOf<P>,
+	) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
 
 	/// Submit headers.
-	async fn submit_headers(&self, headers: Vec<QueuedHeader<P>>) -> SubmittedHeaders<HeaderIdOf<P>, Self::Error>;
+	async fn submit_headers(
+		&self,
+		headers: Vec<QueuedHeader<P>>,
+	) -> SubmittedHeaders<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns ID of headers that require to be 'completed' before children can be submitted.
 	async fn incomplete_headers_ids(&self) -> Result<HashSet<HeaderIdOf<P>>, Self::Error>;
 
 	/// Submit completion data for header.
-	async fn complete_header(&self, id: HeaderIdOf<P>, completion: P::Completion)
-		-> Result<HeaderIdOf<P>, Self::Error>;
+	async fn complete_header(
+		&self,
+		id: HeaderIdOf<P>,
+		completion: P::Completion,
+	) -> Result<HeaderIdOf<P>, Self::Error>;
 
 	/// Returns true if header requires extra data to be submitted.
-	async fn requires_extra(&self, header: QueuedHeader<P>) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
+	async fn requires_extra(
+		&self,
+		header: QueuedHeader<P>,
+	) -> Result<(HeaderIdOf<P>, bool), Self::Error>;
 }
 
 /// Synchronization maintain procedure.
@@ -110,7 +126,8 @@ pub trait SyncMaintain<P: HeadersSyncPipeline>: 'static + Clone + Send + Sync {
 
 impl<P: HeadersSyncPipeline> SyncMaintain<P> for () {}
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: HeadersSyncPipeline>() -> String {
 	format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME)
 }
@@ -126,12 +143,12 @@ pub async fn run<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 	sync_params: HeadersSyncParams,
 	metrics_params: MetricsParams,
 	exit_signal: impl Future<Output = ()> + 'static + Send,
-) -> Result<(), String> {
+) -> Result<(), relay_utils::Error> {
 	let exit_signal = exit_signal.shared();
 	relay_utils::relay_loop(source_client, target_client)
 		.with_metrics(Some(metrics_prefix::<P>()), metrics_params)
-		.loop_metric(|registry, prefix| SyncLoopMetrics::new(registry, prefix))?
-		.standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))?
+		.loop_metric(SyncLoopMetrics::new)?
+		.standalone_metric(GlobalMetrics::new)?
 		.expose()
 		.await?
 		.run(metrics_prefix::<P>(), move |source_client, target_client, metrics| {
@@ -480,7 +497,8 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 					id,
 				);
 
-				target_complete_header_future.set(target_client.complete_header(id, completion.clone()).fuse());
+				target_complete_header_future
+					.set(target_client.complete_header(id, completion.clone()).fuse());
 			} else if let Some(header) = sync.headers().header(HeaderStatus::MaybeExtra) {
 				log::debug!(
 					target: "bridge",
@@ -501,8 +519,8 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 				);
 
 				target_existence_status_future.set(target_client.is_known_header(parent_id).fuse());
-			} else if let Some(headers) =
-				sync.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT)
+			} else if let Some(headers) = sync
+				.select_headers_to_submit(last_update_time.elapsed() > BACKUP_STALL_SYNC_TIMEOUT)
 			{
 				log::debug!(
 					target: "bridge",
@@ -580,7 +598,7 @@ async fn run_until_connection_lost<P: HeadersSyncPipeline, TC: TargetClient<P>>(
 						P::SOURCE_NAME,
 						P::TARGET_NAME,
 					);
-					return Ok(());
+					return Ok(())
 				}
 
 				log::debug!(
@@ -616,15 +634,14 @@ fn print_sync_progress<P: HeadersSyncPipeline>(
 	let now_time = Instant::now();
 	let (now_best_header, now_target_header) = eth_sync.status();
 
-	let need_update = now_time - prev_time > Duration::from_secs(10)
-		|| match (prev_best_header, now_best_header) {
-			(Some(prev_best_header), Some(now_best_header)) => {
-				now_best_header.0.saturating_sub(prev_best_header) > 10.into()
-			}
+	let need_update = now_time - prev_time > Duration::from_secs(10) ||
+		match (prev_best_header, now_best_header) {
+			(Some(prev_best_header), Some(now_best_header)) =>
+				now_best_header.0.saturating_sub(prev_best_header) > 10.into(),
 			_ => false,
 		};
 	if !need_update {
-		return (prev_time, prev_best_header, prev_target_header);
+		return (prev_time, prev_best_header, prev_target_header)
 	}
 
 	log::info!(
diff --git a/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs b/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs
index 37dae1134042890420f43fc19dc8d7ca016e58b2..1c558c25de9d5e06a10af707201c80316c41c8c2 100644
--- a/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs
+++ b/polkadot/bridges/relays/headers/src/sync_loop_metrics.rs
@@ -16,8 +16,10 @@
 
 //! Metrics for headers synchronization relay loop.
 
-use crate::sync::HeadersSync;
-use crate::sync_types::{HeaderStatus, HeadersSyncPipeline};
+use crate::{
+	sync::HeadersSync,
+	sync_types::{HeaderStatus, HeadersSyncPipeline},
+};
 
 use num_traits::Zero;
 use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64};
@@ -78,7 +80,8 @@ impl SyncLoopMetrics {
 	pub fn update<P: HeadersSyncPipeline>(&self, sync: &HeadersSync<P>) {
 		let headers = sync.headers();
 		let source_best_number = sync.source_best_number().unwrap_or_else(Zero::zero);
-		let target_best_number = sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero);
+		let target_best_number =
+			sync.target_best_header().map(|id| id.0).unwrap_or_else(Zero::zero);
 
 		self.update_best_block_at_source(source_best_number);
 		self.update_best_block_at_target(target_best_number);
diff --git a/polkadot/bridges/relays/headers/src/sync_loop_tests.rs b/polkadot/bridges/relays/headers/src/sync_loop_tests.rs
index 11f15778873b9dc56ab7f2a0740c6cc3646d7963..57629d7b4ac937dcb8967d42474be7bf47e0107f 100644
--- a/polkadot/bridges/relays/headers/src/sync_loop_tests.rs
+++ b/polkadot/bridges/relays/headers/src/sync_loop_tests.rs
@@ -16,16 +16,18 @@
 
 #![cfg(test)]
 
-use crate::sync_loop::{run, SourceClient, TargetClient};
-use crate::sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders};
+use crate::{
+	sync_loop::{run, SourceClient, TargetClient},
+	sync_types::{HeadersSyncPipeline, QueuedHeader, SourceHeader, SubmittedHeaders},
+};
 
 use async_trait::async_trait;
 use backoff::backoff::Backoff;
 use futures::{future::FutureExt, stream::StreamExt};
 use parking_lot::Mutex;
 use relay_utils::{
-	metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, retry_backoff, HeaderId,
-	MaybeConnectionError,
+	metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient,
+	retry_backoff, HeaderId, MaybeConnectionError,
 };
 use std::{
 	collections::{HashMap, HashSet},
@@ -166,7 +168,10 @@ impl SourceClient<TestHeadersSyncPipeline> for Source {
 		data.header_by_number.get(&number).cloned().ok_or(TestError(false))
 	}
 
-	async fn header_completion(&self, id: TestHeaderId) -> Result<(TestHeaderId, Option<TestCompletion>), TestError> {
+	async fn header_completion(
+		&self,
+		id: TestHeaderId,
+	) -> Result<(TestHeaderId, Option<TestCompletion>), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(SourceMethod::HeaderCompletion(id), &mut *data);
 		if data.provides_completion {
@@ -264,7 +269,10 @@ impl TargetClient<TestHeadersSyncPipeline> for Target {
 			.unwrap_or(Ok((id, false)))
 	}
 
-	async fn submit_headers(&self, headers: Vec<TestQueuedHeader>) -> SubmittedHeaders<TestHeaderId, TestError> {
+	async fn submit_headers(
+		&self,
+		headers: Vec<TestQueuedHeader>,
+	) -> SubmittedHeaders<TestHeaderId, TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::SubmitHeaders(headers.clone()), &mut *data);
 		data.submitted_headers
@@ -287,14 +295,21 @@ impl TargetClient<TestHeadersSyncPipeline> for Target {
 		}
 	}
 
-	async fn complete_header(&self, id: TestHeaderId, completion: TestCompletion) -> Result<TestHeaderId, TestError> {
+	async fn complete_header(
+		&self,
+		id: TestHeaderId,
+		completion: TestCompletion,
+	) -> Result<TestHeaderId, TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::CompleteHeader(id, completion), &mut *data);
 		data.completed_headers.insert(id.1, completion);
 		Ok(id)
 	}
 
-	async fn requires_extra(&self, header: TestQueuedHeader) -> Result<(TestHeaderId, bool), TestError> {
+	async fn requires_extra(
+		&self,
+		header: TestQueuedHeader,
+	) -> Result<(TestHeaderId, bool), TestError> {
 		let mut data = self.data.lock();
 		(self.on_method_call)(TargetMethod::RequiresExtra(header.clone()), &mut *data);
 		if data.requires_extra {
@@ -321,11 +336,7 @@ fn test_header(number: TestNumber) -> TestHeader {
 	TestHeader {
 		hash: id.1,
 		number: id.0,
-		parent_hash: if number == 0 {
-			TestHash::default()
-		} else {
-			test_id(number - 1).1
-		},
+		parent_hash: if number == 0 { TestHash::default() } else { test_id(number - 1).1 },
 	}
 }
 
@@ -467,18 +478,15 @@ fn run_sync_loop_test(params: SyncLoopTestParams) {
 	let target_requires_extra = params.target_requires_extra;
 	let target_requires_completion = params.target_requires_completion;
 	let stop_at = params.stop_at;
-	let source = Source::new(
-		params.best_source_header.id(),
-		params.headers_on_source,
-		move |method, _| {
+	let source =
+		Source::new(params.best_source_header.id(), params.headers_on_source, move |method, _| {
 			if !target_requires_extra {
 				source_reject_extra(&method);
 			}
 			if !target_requires_completion {
 				source_reject_completion(&method);
 			}
-		},
-	);
+		});
 	let target = Target::new(
 		params.best_target_header.id(),
 		params.headers_on_target.into_iter().map(|header| header.id()).collect(),
diff --git a/polkadot/bridges/relays/headers/src/sync_types.rs b/polkadot/bridges/relays/headers/src/sync_types.rs
index 5809ebab59e1c47b1b9e8a02c690ee3523bc3521..8d93e8bf49fbea15e8c1305b2d0ce3a7a7bbe092 100644
--- a/polkadot/bridges/relays/headers/src/sync_types.rs
+++ b/polkadot/bridges/relays/headers/src/sync_types.rs
@@ -50,7 +50,14 @@ pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync {
 	const TARGET_NAME: &'static str;
 
 	/// Headers we're syncing are identified by this hash.
-	type Hash: Eq + Clone + Copy + Send + Sync + std::fmt::Debug + std::fmt::Display + std::hash::Hash;
+	type Hash: Eq
+		+ Clone
+		+ Copy
+		+ Send
+		+ Sync
+		+ std::fmt::Debug
+		+ std::fmt::Display
+		+ std::hash::Hash;
 	/// Headers we're syncing are identified by this number.
 	type Number: relay_utils::BlockNumberBase;
 	/// Type of header that we're syncing.
@@ -77,7 +84,8 @@ pub trait HeadersSyncPipeline: 'static + Clone + Send + Sync {
 }
 
 /// A HeaderId for `HeaderSyncPipeline`.
-pub type HeaderIdOf<P> = HeaderId<<P as HeadersSyncPipeline>::Hash, <P as HeadersSyncPipeline>::Number>;
+pub type HeaderIdOf<P> =
+	HeaderId<<P as HeadersSyncPipeline>::Hash, <P as HeadersSyncPipeline>::Number>;
 
 /// Header that we're receiving from source node.
 pub trait SourceHeader<Hash, Number>: Clone + std::fmt::Debug + PartialEq + Send + Sync {
@@ -153,13 +161,13 @@ impl<P: HeadersSyncPipeline> QueuedHeader<P> {
 pub struct SubmittedHeaders<Id, Error> {
 	/// IDs of headers that have been submitted to target node.
 	pub submitted: Vec<Id>,
-	/// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted` vec),
-	/// but all descendants are not.
+	/// IDs of incomplete headers. These headers were submitted (so this id is also in `submitted`
+	/// vec), but all descendants are not.
 	pub incomplete: Vec<Id>,
-	/// IDs of ignored headers that we have decided not to submit (they're either rejected by
-	/// target node immediately, or they're descendants of incomplete headers).
+	/// IDs of ignored headers that we have decided not to submit (they are either rejected by
+	/// target node immediately, or their descendants of incomplete headers).
 	pub rejected: Vec<Id>,
-	/// Fatal target node error, if it has occured during submission.
+	/// Fatal target node error, if it has occurred during submission.
 	pub fatal_error: Option<Error>,
 }
 
@@ -180,10 +188,6 @@ impl<Id: std::fmt::Debug, Error> std::fmt::Display for SubmittedHeaders<Id, Erro
 		let incomplete = format_ids(self.incomplete.iter());
 		let rejected = format_ids(self.rejected.iter());
 
-		write!(
-			f,
-			"Submitted: {}, Incomplete: {}, Rejected: {}",
-			submitted, incomplete, rejected
-		)
+		write!(f, "Submitted: {}, Incomplete: {}, Rejected: {}", submitted, incomplete, rejected)
 	}
 }
diff --git a/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml b/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml
new file mode 100644
index 0000000000000000000000000000000000000000..5bee10856daa3c64adf32fef6cb62107bcd6ce3b
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml
@@ -0,0 +1,48 @@
+[package]
+name = "substrate-relay-helper"
+version = "0.1.0"
+authors = ["Parity Technologies <admin@parity.io>"]
+edition = "2018"
+license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
+
+[dependencies]
+anyhow = "1.0"
+thiserror = "1.0.26"
+async-std = "1.9.0"
+async-trait = "0.1.42"
+codec = { package = "parity-scale-codec", version = "2.2.0" }
+futures = "0.3.12"
+num-traits = "0.2"
+log = "0.4.14"
+
+
+# Bridge dependencies
+
+bp-header-chain = { path = "../../primitives/header-chain" }
+bridge-runtime-common = { path = "../../bin/runtime-common" }
+
+finality-grandpa = { version = "0.14.0" }
+finality-relay = { path = "../finality" }
+relay-utils = { path = "../utils" }
+messages-relay = { path = "../messages" }
+relay-substrate-client = { path = "../client-substrate" }
+
+pallet-bridge-messages = { path = "../../modules/messages" }
+
+bp-runtime = { path = "../../primitives/runtime" }
+bp-messages = { path = "../../primitives/messages" }
+
+# Substrate Dependencies
+
+frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" }
+sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" }
+
+[dev-dependencies]
+bp-millau = { path = "../../primitives/chain-millau" }
+bp-rococo = { path = "../../primitives/chain-rococo" }
+bp-wococo = { path = "../../primitives/chain-wococo" }
+relay-rococo-client = { path = "../client-rococo" }
+relay-wococo-client = { path = "../client-wococo" }
+rialto-runtime = { path = "../../bin/rialto/runtime" }
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs b/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs
new file mode 100644
index 0000000000000000000000000000000000000000..93458457d34c9dc4213aa71817d8cb6f73ef6a76
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs
@@ -0,0 +1,243 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Tools for updating conversion rate that is stored in the runtime storage.
+
+use relay_utils::metrics::F64SharedRef;
+use std::{future::Future, time::Duration};
+
+/// Duration between updater iterations.
+const SLEEP_DURATION: Duration = Duration::from_secs(60);
+
+/// Update-conversion-rate transaction status.
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum TransactionStatus {
+	/// We have not submitted any transaction recently.
+	Idle,
+	/// We have recently submitted transaction that should update conversion rate.
+	Submitted(f64),
+}
+
+/// Run infinite conversion rate updater loop.
+///
+/// The loop is maintaining the Left -> Right conversion rate, used as `RightTokens = LeftTokens *
+/// Rate`.
+pub fn run_conversion_rate_update_loop<
+	SubmitConversionRateFuture: Future<Output = anyhow::Result<()>> + Send + 'static,
+>(
+	left_to_right_stored_conversion_rate: F64SharedRef,
+	left_to_base_conversion_rate: F64SharedRef,
+	right_to_base_conversion_rate: F64SharedRef,
+	max_difference_ratio: f64,
+	submit_conversion_rate: impl Fn(f64) -> SubmitConversionRateFuture + Send + 'static,
+) {
+	async_std::task::spawn(async move {
+		let mut transaction_status = TransactionStatus::Idle;
+		loop {
+			async_std::task::sleep(SLEEP_DURATION).await;
+			let maybe_new_conversion_rate = maybe_select_new_conversion_rate(
+				&mut transaction_status,
+				&left_to_right_stored_conversion_rate,
+				&left_to_base_conversion_rate,
+				&right_to_base_conversion_rate,
+				max_difference_ratio,
+			)
+			.await;
+			if let Some((prev_conversion_rate, new_conversion_rate)) = maybe_new_conversion_rate {
+				let submit_conversion_rate_future = submit_conversion_rate(new_conversion_rate);
+				match submit_conversion_rate_future.await {
+					Ok(()) => {
+						transaction_status = TransactionStatus::Submitted(prev_conversion_rate);
+					},
+					Err(error) => {
+						log::trace!(target: "bridge", "Failed to submit conversion rate update transaction: {:?}", error);
+					},
+				}
+			}
+		}
+	});
+}
+
+/// Select new conversion rate to submit to the node.
+async fn maybe_select_new_conversion_rate(
+	transaction_status: &mut TransactionStatus,
+	left_to_right_stored_conversion_rate: &F64SharedRef,
+	left_to_base_conversion_rate: &F64SharedRef,
+	right_to_base_conversion_rate: &F64SharedRef,
+	max_difference_ratio: f64,
+) -> Option<(f64, f64)> {
+	let left_to_right_stored_conversion_rate =
+		(*left_to_right_stored_conversion_rate.read().await)?;
+	match *transaction_status {
+		TransactionStatus::Idle => (),
+		TransactionStatus::Submitted(previous_left_to_right_stored_conversion_rate) => {
+			// we can't compare float values from different sources directly, so we only care
+			// whether the stored rate has been changed or not. If it has been changed, then we
+			// assume that our proposal has been accepted.
+			//
+			// float comparison is ok here, because we compare same-origin (stored in runtime
+			// storage) values and if they are different, it means that the value has actually been
+			// updated
+			#[allow(clippy::float_cmp)]
+			if previous_left_to_right_stored_conversion_rate == left_to_right_stored_conversion_rate
+			{
+				// the rate has not been changed => we won't submit any transactions until it is
+				// accepted, or the rate is changed by someone else
+				return None
+			}
+
+			*transaction_status = TransactionStatus::Idle;
+		},
+	}
+
+	let left_to_base_conversion_rate = (*left_to_base_conversion_rate.read().await)?;
+	let right_to_base_conversion_rate = (*right_to_base_conversion_rate.read().await)?;
+	let actual_left_to_right_conversion_rate =
+		right_to_base_conversion_rate / left_to_base_conversion_rate;
+
+	let rate_difference =
+		(actual_left_to_right_conversion_rate - left_to_right_stored_conversion_rate).abs();
+	let rate_difference_ratio = rate_difference / left_to_right_stored_conversion_rate;
+	if rate_difference_ratio < max_difference_ratio {
+		return None
+	}
+
+	Some((left_to_right_stored_conversion_rate, actual_left_to_right_conversion_rate))
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use async_std::sync::{Arc, RwLock};
+
+	fn test_maybe_select_new_conversion_rate(
+		mut transaction_status: TransactionStatus,
+		stored_conversion_rate: Option<f64>,
+		left_to_base_conversion_rate: Option<f64>,
+		right_to_base_conversion_rate: Option<f64>,
+		max_difference_ratio: f64,
+	) -> (Option<(f64, f64)>, TransactionStatus) {
+		let stored_conversion_rate = Arc::new(RwLock::new(stored_conversion_rate));
+		let left_to_base_conversion_rate = Arc::new(RwLock::new(left_to_base_conversion_rate));
+		let right_to_base_conversion_rate = Arc::new(RwLock::new(right_to_base_conversion_rate));
+		let result = async_std::task::block_on(maybe_select_new_conversion_rate(
+			&mut transaction_status,
+			&stored_conversion_rate,
+			&left_to_base_conversion_rate,
+			&right_to_base_conversion_rate,
+			max_difference_ratio,
+		));
+		(result, transaction_status)
+	}
+
+	#[test]
+	fn rate_is_not_updated_when_transaction_is_submitted() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Submitted(10.0),
+				Some(10.0),
+				Some(1.0),
+				Some(1.0),
+				0.0
+			),
+			(None, TransactionStatus::Submitted(10.0)),
+		);
+	}
+
+	#[test]
+	fn transaction_state_is_changed_to_idle_when_stored_rate_shanges() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Submitted(1.0),
+				Some(10.0),
+				Some(1.0),
+				Some(1.0),
+				100.0
+			),
+			(None, TransactionStatus::Idle),
+		);
+	}
+
+	#[test]
+	fn transaction_is_not_submitted_when_left_to_base_rate_is_unknown() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(10.0),
+				None,
+				Some(1.0),
+				0.0
+			),
+			(None, TransactionStatus::Idle),
+		);
+	}
+
+	#[test]
+	fn transaction_is_not_submitted_when_right_to_base_rate_is_unknown() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(10.0),
+				Some(1.0),
+				None,
+				0.0
+			),
+			(None, TransactionStatus::Idle),
+		);
+	}
+
+	#[test]
+	fn transaction_is_not_submitted_when_stored_rate_is_unknown() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				None,
+				Some(1.0),
+				Some(1.0),
+				0.0
+			),
+			(None, TransactionStatus::Idle),
+		);
+	}
+
+	#[test]
+	fn transaction_is_not_submitted_when_difference_is_below_threshold() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(1.0),
+				Some(1.0),
+				Some(1.01),
+				0.02
+			),
+			(None, TransactionStatus::Idle),
+		);
+	}
+
+	#[test]
+	fn transaction_is_submitted_when_difference_is_above_threshold() {
+		assert_eq!(
+			test_maybe_select_new_conversion_rate(
+				TransactionStatus::Idle,
+				Some(1.0),
+				Some(1.0),
+				Some(1.03),
+				0.02
+			),
+			(Some((1.0, 1.03)), TransactionStatus::Idle),
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/error.rs b/polkadot/bridges/relays/lib-substrate-relay/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..802499503563dcbee09a6fa058520880bdc8d918
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/error.rs
@@ -0,0 +1,58 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Relay errors.
+
+use relay_substrate_client as client;
+use sp_finality_grandpa::AuthorityList;
+use sp_runtime::traits::MaybeDisplay;
+use std::fmt::Debug;
+use thiserror::Error;
+
+/// Relay errors.
+#[derive(Error, Debug)]
+pub enum Error<Hash: Debug + MaybeDisplay, HeaderNumber: Debug + MaybeDisplay> {
+	/// Failed to submit signed extrinsic from to the target chain.
+	#[error("Failed to submit {0} transaction: {1:?}")]
+	SubmitTransaction(&'static str, client::Error),
+	/// Failed subscribe to justification stream of the source chain.
+	#[error("Failed to subscribe to {0} justifications: {1:?}")]
+	Subscribe(&'static str, client::Error),
+	/// Failed subscribe to read justification from the source chain (client error).
+	#[error("Failed to read {0} justification from the stream: {1}")]
+	ReadJustification(&'static str, client::Error),
+	/// Failed subscribe to read justification from the source chain (stream ended).
+	#[error("Failed to read {0} justification from the stream: stream has ended unexpectedly")]
+	ReadJustificationStreamEnded(&'static str),
+	/// Failed subscribe to decode justification from the source chain.
+	#[error("Failed to decode {0} justification: {1:?}")]
+	DecodeJustification(&'static str, codec::Error),
+	/// GRANDPA authorities read from the source chain are invalid.
+	#[error("Read invalid {0} authorities set: {1:?}")]
+	ReadInvalidAuthorities(&'static str, AuthorityList),
+	/// Failed to guess initial GRANDPA authorities at the given header of the source chain.
+	#[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")]
+	GuessInitialAuthorities(&'static str, HeaderNumber),
+	/// Failed to retrieve GRANDPA authorities at the given header from the source chain.
+	#[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")]
+	RetrieveAuthorities(&'static str, Hash, client::Error),
+	/// Failed to decode GRANDPA authorities at the given header of the source chain.
+	#[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")]
+	DecodeAuthorities(&'static str, Hash, codec::Error),
+	/// Failed to retrieve header by the hash from the source chain.
+	#[error("Failed to retrieve {0} header with hash {1}: {:?}")]
+	RetrieveHeader(&'static str, Hash, client::Error),
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
similarity index 72%
rename from polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs
rename to polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
index 19fa0917df3904448aa014c0918cd1cb71bb9088..cdfbb3354d27412853992e9fd0a9e7c3d66cc88d 100644
--- a/polkadot/bridges/relays/bin-substrate/src/finality_pipeline.rs
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs
@@ -16,17 +16,18 @@
 
 //! Substrate-to-Substrate headers sync entrypoint.
 
-use crate::finality_target::SubstrateFinalityTarget;
+use crate::{finality_target::SubstrateFinalityTarget, STALL_TIMEOUT};
 
 use bp_header_chain::justification::GrandpaJustification;
+use bp_runtime::AccountIdOf;
 use finality_relay::{FinalitySyncParams, FinalitySyncPipeline};
-use relay_substrate_client::{finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader};
+use relay_substrate_client::{
+	finality_source::FinalitySource, BlockNumberOf, Chain, Client, HashOf, SyncHeader,
+};
 use relay_utils::{metrics::MetricsParams, BlockNumberBase};
 use sp_core::Bytes;
-use std::{fmt::Debug, marker::PhantomData, time::Duration};
+use std::{fmt::Debug, marker::PhantomData};
 
-/// Default synchronization loop timeout.
-pub(crate) const STALL_TIMEOUT: Duration = Duration::from_secs(120);
 /// Default limit of recent finality proofs.
 ///
 /// Finality delay of 4096 blocks is unlikely to happen in practice in
@@ -34,7 +35,10 @@ pub(crate) const STALL_TIMEOUT: Duration = Duration::from_secs(120);
 pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096;
 
 /// Headers sync pipeline for Substrate <-> Substrate relays.
-pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline {
+pub trait SubstrateFinalitySyncPipeline: 'static + Clone + Debug + Send + Sync {
+	/// Pipeline for syncing finalized Source chain headers to Target chain.
+	type FinalitySyncPipeline: FinalitySyncPipeline;
+
 	/// Name of the runtime method that returns id of best finalized source header at target chain.
 	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str;
 
@@ -49,19 +53,20 @@ pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline {
 	/// Start finality relay guards.
 	///
 	/// Different finality bridges may have different set of guards - e.g. on ephemeral chains we
-	/// don't need version guards, on test chains we don't care that much about relayer account
+	/// don't need a version guards, on test chains we don't care that much about relayer account
 	/// balance, ... So the implementation is left to the specific bridges.
 	fn start_relay_guards(&self) {}
 
 	/// Returns id of account that we're using to sign transactions at target chain.
-	fn transactions_author(&self) -> <Self::TargetChain as Chain>::AccountId;
+	fn transactions_author(&self) -> AccountIdOf<Self::TargetChain>;
 
 	/// Make submit header transaction.
 	fn make_submit_finality_proof_transaction(
 		&self,
-		transaction_nonce: <Self::TargetChain as Chain>::Index,
-		header: Self::Header,
-		proof: Self::FinalityProof,
+		era: bp_runtime::TransactionEraOf<Self::TargetChain>,
+		transaction_nonce: bp_runtime::IndexOf<Self::TargetChain>,
+		header: <Self::FinalitySyncPipeline as FinalitySyncPipeline>::Header,
+		proof: <Self::FinalitySyncPipeline as FinalitySyncPipeline>::FinalityProof,
 	) -> Bytes;
 }
 
@@ -69,9 +74,9 @@ pub trait SubstrateFinalitySyncPipeline: FinalitySyncPipeline {
 #[derive(Clone)]
 pub struct SubstrateFinalityToSubstrate<SourceChain, TargetChain: Chain, TargetSign> {
 	/// Client for the target chain.
-	pub(crate) target_client: Client<TargetChain>,
+	pub target_client: Client<TargetChain>,
 	/// Data required to sign target chain transactions.
-	pub(crate) target_sign: TargetSign,
+	pub target_sign: TargetSign,
 	/// Unused generic arguments dump.
 	_marker: PhantomData<SourceChain>,
 }
@@ -86,14 +91,12 @@ impl<SourceChain, TargetChain: Chain, TargetSign> Debug
 	}
 }
 
-impl<SourceChain, TargetChain: Chain, TargetSign> SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign> {
+impl<SourceChain, TargetChain: Chain, TargetSign>
+	SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>
+{
 	/// Create new Substrate-to-Substrate headers pipeline.
 	pub fn new(target_client: Client<TargetChain>, target_sign: TargetSign) -> Self {
-		SubstrateFinalityToSubstrate {
-			target_client,
-			target_sign,
-			_marker: Default::default(),
-		}
+		SubstrateFinalityToSubstrate { target_client, target_sign, _marker: Default::default() }
 	}
 }
 
@@ -120,15 +123,16 @@ pub async fn run<SourceChain, TargetChain, P>(
 	source_client: Client<SourceChain>,
 	target_client: Client<TargetChain>,
 	only_mandatory_headers: bool,
+	transactions_mortality: Option<u32>,
 	metrics_params: MetricsParams,
 ) -> anyhow::Result<()>
 where
-	P: SubstrateFinalitySyncPipeline<
+	P: SubstrateFinalitySyncPipeline<TargetChain = TargetChain>,
+	P::FinalitySyncPipeline: FinalitySyncPipeline<
 		Hash = HashOf<SourceChain>,
 		Number = BlockNumberOf<SourceChain>,
 		Header = SyncHeader<SourceChain::Header>,
 		FinalityProof = GrandpaJustification<SourceChain::Header>,
-		TargetChain = TargetChain,
 	>,
 	SourceChain: Clone + Chain,
 	BlockNumberOf<SourceChain>: BlockNumberBase,
@@ -143,11 +147,18 @@ where
 
 	finality_relay::run(
 		FinalitySource::new(source_client, None),
-		SubstrateFinalityTarget::new(target_client, pipeline),
+		SubstrateFinalityTarget::new(target_client, pipeline, transactions_mortality),
 		FinalitySyncParams {
-			tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL),
+			tick: std::cmp::max(
+				SourceChain::AVERAGE_BLOCK_INTERVAL,
+				TargetChain::AVERAGE_BLOCK_INTERVAL,
+			),
 			recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT,
-			stall_timeout: STALL_TIMEOUT,
+			stall_timeout: relay_substrate_client::transaction_stall_timeout(
+				transactions_mortality,
+				TargetChain::AVERAGE_BLOCK_INTERVAL,
+				STALL_TIMEOUT,
+			),
 			only_mandatory_headers,
 		},
 		metrics_params,
diff --git a/polkadot/bridges/relays/bin-substrate/src/finality_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs
similarity index 58%
rename from polkadot/bridges/relays/bin-substrate/src/finality_target.rs
rename to polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs
index ffa10cabacbfa53822841fbf419deefa78a895e4..f50bd103f4300c0e78024b6cd50c81f5f83233cf 100644
--- a/polkadot/bridges/relays/bin-substrate/src/finality_target.rs
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs
@@ -22,7 +22,7 @@ use crate::finality_pipeline::SubstrateFinalitySyncPipeline;
 
 use async_trait::async_trait;
 use codec::Decode;
-use finality_relay::TargetClient;
+use finality_relay::{FinalitySyncPipeline, TargetClient};
 use relay_substrate_client::{Chain, Client, Error as SubstrateError};
 use relay_utils::relay_loop::Client as RelayClient;
 
@@ -30,12 +30,13 @@ use relay_utils::relay_loop::Client as RelayClient;
 pub struct SubstrateFinalityTarget<C: Chain, P> {
 	client: Client<C>,
 	pipeline: P,
+	transactions_mortality: Option<u32>,
 }
 
 impl<C: Chain, P> SubstrateFinalityTarget<C, P> {
 	/// Create new Substrate headers target.
-	pub fn new(client: Client<C>, pipeline: P) -> Self {
-		SubstrateFinalityTarget { client, pipeline }
+	pub fn new(client: Client<C>, pipeline: P, transactions_mortality: Option<u32>) -> Self {
+		SubstrateFinalityTarget { client, pipeline, transactions_mortality }
 	}
 }
 
@@ -44,6 +45,7 @@ impl<C: Chain, P: SubstrateFinalitySyncPipeline> Clone for SubstrateFinalityTarg
 		SubstrateFinalityTarget {
 			client: self.client.clone(),
 			pipeline: self.pipeline.clone(),
+			transactions_mortality: self.transactions_mortality,
 		}
 	}
 }
@@ -58,33 +60,53 @@ impl<C: Chain, P: SubstrateFinalitySyncPipeline> RelayClient for SubstrateFinali
 }
 
 #[async_trait]
-impl<C, P> TargetClient<P> for SubstrateFinalityTarget<C, P>
+impl<C, P> TargetClient<P::FinalitySyncPipeline> for SubstrateFinalityTarget<C, P>
 where
 	C: Chain,
-	P::Number: Decode,
-	P::Hash: Decode,
 	P: SubstrateFinalitySyncPipeline<TargetChain = C>,
+	<P::FinalitySyncPipeline as FinalitySyncPipeline>::Number: Decode,
+	<P::FinalitySyncPipeline as FinalitySyncPipeline>::Hash: Decode,
 {
-	async fn best_finalized_source_block_number(&self) -> Result<P::Number, SubstrateError> {
+	async fn best_finalized_source_block_number(
+		&self,
+	) -> Result<<P::FinalitySyncPipeline as FinalitySyncPipeline>::Number, SubstrateError> {
 		// we can't continue to relay finality if target node is out of sync, because
 		// it may have already received (some of) headers that we're going to relay
 		self.client.ensure_synced().await?;
 
-		Ok(crate::messages_source::read_client_state::<C, P::Hash, P::Number>(
-			&self.client,
-			P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET,
-		)
+		Ok(crate::messages_source::read_client_state::<
+			C,
+			<P::FinalitySyncPipeline as FinalitySyncPipeline>::Hash,
+			<P::FinalitySyncPipeline as FinalitySyncPipeline>::Number,
+		>(&self.client, P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET)
 		.await?
 		.best_finalized_peer_at_best_self
 		.0)
 	}
 
-	async fn submit_finality_proof(&self, header: P::Header, proof: P::FinalityProof) -> Result<(), SubstrateError> {
+	async fn submit_finality_proof(
+		&self,
+		header: <P::FinalitySyncPipeline as FinalitySyncPipeline>::Header,
+		proof: <P::FinalitySyncPipeline as FinalitySyncPipeline>::FinalityProof,
+	) -> Result<(), SubstrateError> {
+		let transactions_author = self.pipeline.transactions_author();
+		let pipeline = self.pipeline.clone();
+		let transactions_mortality = self.transactions_mortality;
 		self.client
-			.submit_signed_extrinsic(self.pipeline.transactions_author(), move |transaction_nonce| {
-				self.pipeline
-					.make_submit_finality_proof_transaction(transaction_nonce, header, proof)
-			})
+			.submit_signed_extrinsic(
+				transactions_author,
+				move |best_block_id, transaction_nonce| {
+					pipeline.make_submit_finality_proof_transaction(
+						relay_substrate_client::TransactionEra::new(
+							best_block_id,
+							transactions_mortality,
+						),
+						transaction_nonce,
+						header,
+						proof,
+					)
+				},
+			)
 			.await
 			.map(drop)
 	}
diff --git a/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs b/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
similarity index 72%
rename from polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs
rename to polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
index c2eab1bd3534e8f3be8285f09bcc0ac57fab6ac5..2e802c4cb215078cb121449d18093c90d7580d2b 100644
--- a/polkadot/bridges/relays/bin-substrate/src/headers_initialize.rs
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs
@@ -21,10 +21,12 @@
 //! and authorities set from source to target chain. The headers sync starts
 //! with this header.
 
-use bp_header_chain::InitializationData;
+use crate::error::Error;
+
 use bp_header_chain::{
 	find_grandpa_authorities_scheduled_change,
 	justification::{verify_justification, GrandpaJustification},
+	InitializationData,
 };
 use codec::Decode;
 use finality_grandpa::voter_set::VoterSet;
@@ -39,7 +41,9 @@ pub async fn initialize<SourceChain: Chain, TargetChain: Chain>(
 	source_client: Client<SourceChain>,
 	target_client: Client<TargetChain>,
 	target_transactions_signer: TargetChain::AccountId,
-	prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData<SourceChain::Header>) -> Bytes,
+	prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData<SourceChain::Header>) -> Bytes
+		+ Send
+		+ 'static,
 ) {
 	let result = do_initialize(
 		source_client,
@@ -72,8 +76,10 @@ async fn do_initialize<SourceChain: Chain, TargetChain: Chain>(
 	source_client: Client<SourceChain>,
 	target_client: Client<TargetChain>,
 	target_transactions_signer: TargetChain::AccountId,
-	prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData<SourceChain::Header>) -> Bytes,
-) -> Result<TargetChain::Hash, String> {
+	prepare_initialize_transaction: impl FnOnce(TargetChain::Index, InitializationData<SourceChain::Header>) -> Bytes
+		+ Send
+		+ 'static,
+) -> Result<TargetChain::Hash, Error<SourceChain::Hash, <SourceChain::Header as HeaderT>::Number>> {
 	let initialization_data = prepare_initialization_data(source_client).await?;
 	log::info!(
 		target: "bridge",
@@ -84,40 +90,44 @@ async fn do_initialize<SourceChain: Chain, TargetChain: Chain>(
 	);
 
 	let initialization_tx_hash = target_client
-		.submit_signed_extrinsic(target_transactions_signer, move |transaction_nonce| {
+		.submit_signed_extrinsic(target_transactions_signer, move |_, transaction_nonce| {
 			prepare_initialize_transaction(transaction_nonce, initialization_data)
 		})
 		.await
-		.map_err(|err| format!("Failed to submit {} transaction: {:?}", TargetChain::NAME, err))?;
+		.map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))?;
 	Ok(initialization_tx_hash)
 }
 
 /// Prepare initialization data for the GRANDPA verifier pallet.
 async fn prepare_initialization_data<SourceChain: Chain>(
 	source_client: Client<SourceChain>,
-) -> Result<InitializationData<SourceChain::Header>, String> {
+) -> Result<
+	InitializationData<SourceChain::Header>,
+	Error<SourceChain::Hash, <SourceChain::Header as HeaderT>::Number>,
+> {
 	// In ideal world we just need to get best finalized header and then to read GRANDPA authorities
 	// set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header.
 	//
-	// But now there are problems with this approach - `CurrentSetId` may return invalid value. So here
-	// we're waiting for the next justification, read the authorities set and then try to figure out
-	// the set id with bruteforce.
-	let mut justifications = source_client
+	// But now there are problems with this approach - `CurrentSetId` may return invalid value. So
+	// here we're waiting for the next justification, read the authorities set and then try to
+	// figure out the set id with bruteforce.
+	let justifications = source_client
 		.subscribe_justifications()
 		.await
-		.map_err(|err| format!("Failed to subscribe to {} justifications: {:?}", SourceChain::NAME, err))?;
-
+		.map_err(|err| Error::Subscribe(SourceChain::NAME, err))?;
 	// Read next justification - the header that it finalizes will be used as initial header.
-	let justification = justifications.next().await.ok_or_else(|| {
-		format!(
-			"Failed to read {} justification from the stream: stream has ended unexpectedly",
-			SourceChain::NAME,
-		)
-	})?;
+	let justification = justifications
+		.next()
+		.await
+		.map_err(|e| Error::ReadJustification(SourceChain::NAME, e))
+		.and_then(|justification| {
+			justification.ok_or(Error::ReadJustificationStreamEnded(SourceChain::NAME))
+		})?;
 
 	// Read initial header.
-	let justification: GrandpaJustification<SourceChain::Header> = Decode::decode(&mut &justification.0[..])
-		.map_err(|err| format!("Failed to decode {} justification: {:?}", SourceChain::NAME, err))?;
+	let justification: GrandpaJustification<SourceChain::Header> =
+		Decode::decode(&mut &justification.0[..])
+			.map_err(|err| Error::DecodeJustification(SourceChain::NAME, err))?;
 
 	let (initial_header_hash, initial_header_number) =
 		(justification.commit.target_hash, justification.commit.target_number);
@@ -130,7 +140,8 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	);
 
 	// Read GRANDPA authorities set at initial header.
-	let initial_authorities_set = source_authorities_set(&source_client, initial_header_hash).await?;
+	let initial_authorities_set =
+		source_authorities_set(&source_client, initial_header_hash).await?;
 	log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}",
 		SourceChain::NAME,
 		initial_authorities_set,
@@ -149,7 +160,8 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	);
 	let schedules_change = scheduled_change.is_some();
 	if schedules_change {
-		authorities_for_verification = source_authorities_set(&source_client, *initial_header.parent_hash()).await?;
+		authorities_for_verification =
+			source_authorities_set(&source_client, *initial_header.parent_hash()).await?;
 		log::trace!(
 			target: "bridge",
 			"Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}",
@@ -161,13 +173,8 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 	// Now let's try to guess authorities set id by verifying justification.
 	let mut initial_authorities_set_id = 0;
 	let mut min_possible_block_number = SourceChain::BlockNumber::zero();
-	let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()).ok_or_else(|| {
-		format!(
-			"Read invalid {} authorities set: {:?}",
-			SourceChain::NAME,
-			authorities_for_verification,
-		)
-	})?;
+	let authorities_for_verification = VoterSet::new(authorities_for_verification.clone())
+		.ok_or(Error::ReadInvalidAuthorities(SourceChain::NAME, authorities_for_verification))?;
 	loop {
 		log::trace!(
 			target: "bridge", "Trying {} GRANDPA authorities set id: {}",
@@ -184,26 +191,21 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 		.is_ok();
 
 		if is_valid_set_id {
-			break;
+			break
 		}
 
 		initial_authorities_set_id += 1;
 		min_possible_block_number += One::one();
 		if min_possible_block_number > initial_header_number {
-			// there can't be more authorities set changes than headers => if we have reached `initial_block_number`
-			// and still have not found correct value of `initial_authorities_set_id`, then something
-			// else is broken => fail
-			return Err(format!(
-				"Failed to guess initial {} GRANDPA authorities set id: checked all\
-			possible ids in range [0; {}]",
-				SourceChain::NAME,
-				initial_header_number
-			));
+			// there can't be more authorities set changes than headers => if we have reached
+			// `initial_block_number` and still have not found correct value of
+			// `initial_authorities_set_id`, then something else is broken => fail
+			return Err(Error::GuessInitialAuthorities(SourceChain::NAME, initial_header_number))
 		}
 	}
 
 	Ok(InitializationData {
-		header: initial_header,
+		header: Box::new(initial_header),
 		authority_list: initial_authorities_set,
 		set_id: if schedules_change {
 			initial_authorities_set_id + 1
@@ -218,39 +220,24 @@ async fn prepare_initialization_data<SourceChain: Chain>(
 async fn source_header<SourceChain: Chain>(
 	source_client: &Client<SourceChain>,
 	header_hash: SourceChain::Hash,
-) -> Result<SourceChain::Header, String> {
-	source_client.header_by_hash(header_hash).await.map_err(|err| {
-		format!(
-			"Failed to retrive {} header with hash {}: {:?}",
-			SourceChain::NAME,
-			header_hash,
-			err,
-		)
-	})
+) -> Result<SourceChain::Header, Error<SourceChain::Hash, <SourceChain::Header as HeaderT>::Number>>
+{
+	source_client
+		.header_by_hash(header_hash)
+		.await
+		.map_err(|err| Error::RetrieveHeader(SourceChain::NAME, header_hash, err))
 }
 
 /// Read GRANDPA authorities set at given header.
 async fn source_authorities_set<SourceChain: Chain>(
 	source_client: &Client<SourceChain>,
 	header_hash: SourceChain::Hash,
-) -> Result<GrandpaAuthoritiesSet, String> {
+) -> Result<GrandpaAuthoritiesSet, Error<SourceChain::Hash, <SourceChain::Header as HeaderT>::Number>>
+{
 	let raw_authorities_set = source_client
 		.grandpa_authorities_set(header_hash)
 		.await
-		.map_err(|err| {
-			format!(
-				"Failed to retrive {} GRANDPA authorities set at header {}: {:?}",
-				SourceChain::NAME,
-				header_hash,
-				err,
-			)
-		})?;
-	GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]).map_err(|err| {
-		format!(
-			"Failed to decode {} GRANDPA authorities set at header {}: {:?}",
-			SourceChain::NAME,
-			header_hash,
-			err,
-		)
-	})
+		.map_err(|err| Error::RetrieveAuthorities(SourceChain::NAME, header_hash, err))?;
+	GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..])
+		.map_err(|err| Error::DecodeAuthorities(SourceChain::NAME, header_hash, err))
 }
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs b/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs
new file mode 100644
index 0000000000000000000000000000000000000000..01f881998ad008abe0e9e9f817e59d54f1b6a4d0
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs
@@ -0,0 +1,35 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Substrate relay helpers
+
+use relay_utils::metrics::{FloatJsonValueMetric, PrometheusError, Registry};
+
+/// Creates standalone token price metric.
+pub fn token_price_metric(
+	registry: &Registry,
+	prefix: Option<&str>,
+	token_id: &str,
+) -> Result<FloatJsonValueMetric, PrometheusError> {
+	FloatJsonValueMetric::new(
+		registry,
+		prefix,
+		format!("https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc", token_id),
+		format!("$.{}.btc", token_id),
+		format!("{}_to_base_conversion_rate", token_id.replace("-", "_")),
+		format!("Rate used to convert from {} to some BASE tokens", token_id.to_uppercase()),
+	)
+}
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs b/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs
new file mode 100644
index 0000000000000000000000000000000000000000..cc066bf501ac6048730809b5c795e077d65dcd3b
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs
@@ -0,0 +1,41 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! The library of substrate relay. contains some public codes to provide to substrate relay.
+
+#![warn(missing_docs)]
+
+use std::time::Duration;
+
+pub mod conversion_rate_update;
+pub mod error;
+pub mod finality_pipeline;
+pub mod finality_target;
+pub mod headers_initialize;
+pub mod helpers;
+pub mod messages_lane;
+pub mod messages_source;
+pub mod messages_target;
+pub mod on_demand_headers;
+
+/// Default relay loop stall timeout. If transactions generated by relay are immortal, then
+/// this timeout is used.
+///
+/// There are no any strict requirements on block time in Substrate. But we assume here that all
+/// Substrate-based chains will be designed to produce relatively fast (compared to the slowest
+/// blockchains) blocks. So 1 hour seems to be a good guess for (even congested) chains to mine
+/// transaction, or remove it from the pool.
+pub const STALL_TIMEOUT: Duration = Duration::from_secs(60 * 60);
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs
new file mode 100644
index 0000000000000000000000000000000000000000..5e9564cf95e3c3ac9c43b77c07c22dc41a2a938f
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs
@@ -0,0 +1,380 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Tools for supporting message lanes between two Substrate-based chains.
+
+use crate::{
+	messages_source::SubstrateMessagesProof, messages_target::SubstrateMessagesReceivingProof,
+	on_demand_headers::OnDemandHeadersRelay,
+};
+
+use async_trait::async_trait;
+use bp_messages::{LaneId, MessageNonce};
+use bp_runtime::{AccountIdOf, IndexOf};
+use frame_support::weights::Weight;
+use messages_relay::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	relay_strategy::RelayStrategy,
+};
+use relay_substrate_client::{
+	metrics::{FloatStorageValueMetric, StorageProofOverheadMetric},
+	BlockNumberOf, Chain, Client, HashOf,
+};
+use relay_utils::{
+	metrics::{F64SharedRef, MetricsParams},
+	BlockNumberBase,
+};
+use sp_core::{storage::StorageKey, Bytes};
+use sp_runtime::FixedU128;
+use std::ops::RangeInclusive;
+
+/// Substrate <-> Substrate messages relay parameters.
+pub struct MessagesRelayParams<SC: Chain, SS, TC: Chain, TS, Strategy: RelayStrategy> {
+	/// Messages source client.
+	pub source_client: Client<SC>,
+	/// Sign parameters for messages source chain.
+	pub source_sign: SS,
+	/// Mortality of source transactions.
+	pub source_transactions_mortality: Option<u32>,
+	/// Messages target client.
+	pub target_client: Client<TC>,
+	/// Sign parameters for messages target chain.
+	pub target_sign: TS,
+	/// Mortality of target transactions.
+	pub target_transactions_mortality: Option<u32>,
+	/// Optional on-demand source to target headers relay.
+	pub source_to_target_headers_relay: Option<OnDemandHeadersRelay<SC>>,
+	/// Optional on-demand target to source headers relay.
+	pub target_to_source_headers_relay: Option<OnDemandHeadersRelay<TC>>,
+	/// Identifier of lane that needs to be served.
+	pub lane_id: LaneId,
+	/// Metrics parameters.
+	pub metrics_params: MetricsParams,
+	/// Relay strategy
+	pub relay_strategy: Strategy,
+}
+
+/// Message sync pipeline for Substrate <-> Substrate relays.
+#[async_trait]
+pub trait SubstrateMessageLane: 'static + Clone + Send + Sync {
+	/// Underlying generic message lane.
+	type MessageLane: MessageLane;
+
+	/// Name of the runtime method that returns dispatch weight of outbound messages at the source
+	/// chain.
+	const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str;
+	/// Name of the runtime method that returns latest generated nonce at the source chain.
+	const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str;
+	/// Name of the runtime method that returns latest received (confirmed) nonce at the the source
+	/// chain.
+	const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
+
+	/// Name of the runtime method that returns latest received nonce at the target chain.
+	const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str;
+	/// Name of the runtime method that returns the latest confirmed (reward-paid) nonce at the
+	/// target chain.
+	const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str;
+	/// Number of the runtime method that returns state of "unrewarded relayers" set at the target
+	/// chain.
+	const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str;
+
+	/// Name of the runtime method that returns id of best finalized source header at target chain.
+	const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str;
+	/// Name of the runtime method that returns id of best finalized target header at source chain.
+	const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str;
+
+	/// Name of the messages pallet as it is declared in the `construct_runtime!()` at source chain.
+	const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str;
+	/// Name of the messages pallet as it is declared in the `construct_runtime!()` at target chain.
+	const MESSAGE_PALLET_NAME_AT_TARGET: &'static str;
+
+	/// Extra weight of the delivery transaction at the target chain, that is paid to cover
+	/// dispatch fee payment.
+	///
+	/// If dispatch fee is paid at the source chain, then this weight is refunded by the
+	/// delivery transaction.
+	const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight;
+
+	/// Source chain.
+	type SourceChain: Chain;
+	/// Target chain.
+	type TargetChain: Chain;
+
+	/// Returns id of account that we're using to sign transactions at target chain (messages
+	/// proof).
+	fn target_transactions_author(&self) -> AccountIdOf<Self::TargetChain>;
+
+	/// Make messages delivery transaction.
+	fn make_messages_delivery_transaction(
+		&self,
+		best_block_id: TargetHeaderIdOf<Self::MessageLane>,
+		transaction_nonce: IndexOf<Self::TargetChain>,
+		generated_at_header: SourceHeaderIdOf<Self::MessageLane>,
+		nonces: RangeInclusive<MessageNonce>,
+		proof: <Self::MessageLane as MessageLane>::MessagesProof,
+	) -> Bytes;
+
+	/// Returns id of account that we're using to sign transactions at source chain (delivery
+	/// proof).
+	fn source_transactions_author(&self) -> AccountIdOf<Self::SourceChain>;
+
+	/// Make messages receiving proof transaction.
+	fn make_messages_receiving_proof_transaction(
+		&self,
+		best_block_id: SourceHeaderIdOf<Self::MessageLane>,
+		transaction_nonce: IndexOf<Self::SourceChain>,
+		generated_at_header: TargetHeaderIdOf<Self::MessageLane>,
+		proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
+	) -> Bytes;
+}
+
+/// Substrate-to-Substrate message lane.
+#[derive(Debug)]
+pub struct SubstrateMessageLaneToSubstrate<
+	Source: Chain,
+	SourceSignParams,
+	Target: Chain,
+	TargetSignParams,
+> {
+	/// Client for the source Substrate chain.
+	pub source_client: Client<Source>,
+	/// Parameters required to sign transactions for source chain.
+	pub source_sign: SourceSignParams,
+	/// Source transactions mortality.
+	pub source_transactions_mortality: Option<u32>,
+	/// Client for the target Substrate chain.
+	pub target_client: Client<Target>,
+	/// Parameters required to sign transactions for target chain.
+	pub target_sign: TargetSignParams,
+	/// Target transactions mortality.
+	pub target_transactions_mortality: Option<u32>,
+	/// Account id of relayer at the source chain.
+	pub relayer_id_at_source: Source::AccountId,
+}
+
+impl<Source: Chain, SourceSignParams: Clone, Target: Chain, TargetSignParams: Clone> Clone
+	for SubstrateMessageLaneToSubstrate<Source, SourceSignParams, Target, TargetSignParams>
+{
+	fn clone(&self) -> Self {
+		Self {
+			source_client: self.source_client.clone(),
+			source_sign: self.source_sign.clone(),
+			source_transactions_mortality: self.source_transactions_mortality,
+			target_client: self.target_client.clone(),
+			target_sign: self.target_sign.clone(),
+			target_transactions_mortality: self.target_transactions_mortality,
+			relayer_id_at_source: self.relayer_id_at_source.clone(),
+		}
+	}
+}
+
+impl<Source: Chain, SourceSignParams, Target: Chain, TargetSignParams> MessageLane
+	for SubstrateMessageLaneToSubstrate<Source, SourceSignParams, Target, TargetSignParams>
+where
+	SourceSignParams: Clone + Send + Sync + 'static,
+	TargetSignParams: Clone + Send + Sync + 'static,
+	BlockNumberOf<Source>: BlockNumberBase,
+	BlockNumberOf<Target>: BlockNumberBase,
+{
+	const SOURCE_NAME: &'static str = Source::NAME;
+	const TARGET_NAME: &'static str = Target::NAME;
+
+	type MessagesProof = SubstrateMessagesProof<Source>;
+	type MessagesReceivingProof = SubstrateMessagesReceivingProof<Target>;
+
+	type SourceChainBalance = Source::Balance;
+	type SourceHeaderNumber = BlockNumberOf<Source>;
+	type SourceHeaderHash = HashOf<Source>;
+
+	type TargetHeaderNumber = BlockNumberOf<Target>;
+	type TargetHeaderHash = HashOf<Target>;
+}
+
+/// Returns maximal number of messages and their maximal cumulative dispatch weight, based
+/// on given chain parameters.
+pub fn select_delivery_transaction_limits<W: pallet_bridge_messages::WeightInfoExt>(
+	max_extrinsic_weight: Weight,
+	max_unconfirmed_messages_at_inbound_lane: MessageNonce,
+) -> (MessageNonce, Weight) {
+	// We may try to guess accurate value, based on maximal number of messages and per-message
+	// weight overhead, but the relay loop isn't using this info in a super-accurate way anyway.
+	// So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is
+	// for messages dispatch.
+
+	// Another thing to keep in mind is that our runtimes (when this code was written) accept
+	// messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than
+	// that for dispatch.
+
+	let weight_for_delivery_tx = max_extrinsic_weight / 3;
+	let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx;
+
+	let delivery_tx_base_weight = W::receive_messages_proof_overhead() +
+		W::receive_messages_proof_outbound_lane_state_overhead();
+	let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight;
+	let max_number_of_messages = std::cmp::min(
+		delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1),
+		max_unconfirmed_messages_at_inbound_lane,
+	);
+
+	assert!(
+		max_number_of_messages > 0,
+		"Relay should fit at least one message in every delivery transaction",
+	);
+	assert!(
+		weight_for_messages_dispatch >= max_extrinsic_weight / 2,
+		"Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2",
+	);
+
+	(max_number_of_messages, weight_for_messages_dispatch)
+}
+
+/// Shared references to the values of standalone metrics of the message lane relay loop.
+#[derive(Debug, Clone)]
+pub struct StandaloneMessagesMetrics {
+	/// Shared reference to the actual target -> <base> chain token conversion rate.
+	pub target_to_base_conversion_rate: Option<F64SharedRef>,
+	/// Shared reference to the actual source -> <base> chain token conversion rate.
+	pub source_to_base_conversion_rate: Option<F64SharedRef>,
+	/// Shared reference to the stored (in the source chain runtime storage) target -> source chain
+	/// conversion rate.
+	pub target_to_source_conversion_rate: Option<F64SharedRef>,
+}
+
+impl StandaloneMessagesMetrics {
+	/// Return conversion rate from target to source tokens.
+	pub async fn target_to_source_conversion_rate(&self) -> Option<f64> {
+		let target_to_base_conversion_rate =
+			(*self.target_to_base_conversion_rate.as_ref()?.read().await)?;
+		let source_to_base_conversion_rate =
+			(*self.source_to_base_conversion_rate.as_ref()?.read().await)?;
+		Some(source_to_base_conversion_rate / target_to_base_conversion_rate)
+	}
+}
+
+/// Add general standalone metrics for the message lane relay loop.
+pub fn add_standalone_metrics<P: SubstrateMessageLane>(
+	metrics_prefix: Option<String>,
+	metrics_params: MetricsParams,
+	source_client: Client<P::SourceChain>,
+	source_chain_token_id: Option<&str>,
+	target_chain_token_id: Option<&str>,
+	target_to_source_conversion_rate_params: Option<(StorageKey, FixedU128)>,
+) -> anyhow::Result<(MetricsParams, StandaloneMessagesMetrics)> {
+	let mut target_to_source_conversion_rate = None;
+	let mut source_to_base_conversion_rate = None;
+	let mut target_to_base_conversion_rate = None;
+	let mut metrics_params = relay_utils::relay_metrics(metrics_prefix, metrics_params)
+		.standalone_metric(|registry, prefix| {
+			StorageProofOverheadMetric::new(
+				registry,
+				prefix,
+				source_client.clone(),
+				format!("{}_storage_proof_overhead", P::SourceChain::NAME.to_lowercase()),
+				format!("{} storage proof overhead", P::SourceChain::NAME),
+			)
+		})?;
+	if let Some((
+		target_to_source_conversion_rate_storage_key,
+		initial_target_to_source_conversion_rate,
+	)) = target_to_source_conversion_rate_params
+	{
+		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
+			let metric = FloatStorageValueMetric::<_, sp_runtime::FixedU128>::new(
+				registry,
+				prefix,
+				source_client,
+				target_to_source_conversion_rate_storage_key,
+				Some(initial_target_to_source_conversion_rate),
+				format!(
+					"{}_{}_to_{}_conversion_rate",
+					P::SourceChain::NAME,
+					P::TargetChain::NAME,
+					P::SourceChain::NAME
+				),
+				format!(
+					"{} to {} tokens conversion rate (used by {})",
+					P::TargetChain::NAME,
+					P::SourceChain::NAME,
+					P::SourceChain::NAME
+				),
+			)?;
+			target_to_source_conversion_rate = Some(metric.shared_value_ref());
+			Ok(metric)
+		})?;
+	}
+	if let Some(source_chain_token_id) = source_chain_token_id {
+		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
+			let metric =
+				crate::helpers::token_price_metric(registry, prefix, source_chain_token_id)?;
+			source_to_base_conversion_rate = Some(metric.shared_value_ref());
+			Ok(metric)
+		})?;
+	}
+	if let Some(target_chain_token_id) = target_chain_token_id {
+		metrics_params = metrics_params.standalone_metric(|registry, prefix| {
+			let metric =
+				crate::helpers::token_price_metric(registry, prefix, target_chain_token_id)?;
+			target_to_base_conversion_rate = Some(metric.shared_value_ref());
+			Ok(metric)
+		})?;
+	}
+	Ok((
+		metrics_params.into_params(),
+		StandaloneMessagesMetrics {
+			target_to_base_conversion_rate,
+			source_to_base_conversion_rate,
+			target_to_source_conversion_rate,
+		},
+	))
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use async_std::sync::{Arc, RwLock};
+
+	type RialtoToMillauMessagesWeights =
+		pallet_bridge_messages::weights::RialtoWeight<rialto_runtime::Runtime>;
+
+	#[test]
+	fn select_delivery_transaction_limits_works() {
+		let (max_count, max_weight) =
+			select_delivery_transaction_limits::<RialtoToMillauMessagesWeights>(
+				bp_millau::max_extrinsic_weight(),
+				bp_millau::MAX_UNREWARDED_RELAYER_ENTRIES_AT_INBOUND_LANE,
+			);
+		assert_eq!(
+			(max_count, max_weight),
+			// We don't actually care about these values, so feel free to update them whenever test
+			// fails. The only thing to do before that is to ensure that new values looks sane:
+			// i.e. weight reserved for messages dispatch allows dispatch of non-trivial messages.
+			//
+			// Any significant change in this values should attract additional attention.
+			(782, 216_583_333_334),
+		);
+	}
+
+	#[async_std::test]
+	async fn target_to_source_conversion_rate_works() {
+		let metrics = StandaloneMessagesMetrics {
+			target_to_base_conversion_rate: Some(Arc::new(RwLock::new(Some(183.15)))),
+			source_to_base_conversion_rate: Some(Arc::new(RwLock::new(Some(12.32)))),
+			target_to_source_conversion_rate: None, // we don't care
+		};
+
+		assert_eq!(metrics.target_to_source_conversion_rate().await, Some(12.32 / 183.15),);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/messages_source.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs
similarity index 53%
rename from polkadot/bridges/relays/bin-substrate/src/messages_source.rs
rename to polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs
index 88c8b529dcc619f082e9d21ce55e77ccb3c212e5..5f066296e7e71517838f1513e63511dba3eda635 100644
--- a/polkadot/bridges/relays/bin-substrate/src/messages_source.rs
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs
@@ -18,26 +18,37 @@
 //! runtime that implements `<BridgedChainName>HeaderApi` to allow bridging with
 //! <BridgedName> chain.
 
-use crate::messages_lane::SubstrateMessageLane;
-use crate::on_demand_headers::OnDemandHeadersRelay;
+use crate::{
+	messages_lane::SubstrateMessageLane, messages_target::SubstrateMessagesReceivingProof,
+	on_demand_headers::OnDemandHeadersRelay,
+};
 
 use async_trait::async_trait;
-use bp_messages::{LaneId, MessageNonce};
-use bp_runtime::{messages::DispatchFeePayment, ChainId};
-use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof;
+use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
+use bridge_runtime_common::messages::{
+	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
+};
 use codec::{Decode, Encode};
-use frame_support::{traits::Instance, weights::Weight};
+use frame_support::weights::Weight;
 use messages_relay::{
-	message_lane::{SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
 	message_lane_loop::{
-		ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, SourceClientState,
+		ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient,
+		SourceClientState,
 	},
 };
-use relay_substrate_client::{Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf};
+use num_traits::{Bounded, Zero};
+use relay_substrate_client::{
+	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderIdOf, HeaderOf,
+	IndexOf,
+};
 use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId};
 use sp_core::Bytes;
-use sp_runtime::{traits::Header as HeaderT, DeserializeOwned};
-use std::{marker::PhantomData, ops::RangeInclusive};
+use sp_runtime::{
+	traits::{AtLeast32BitUnsigned, Header as HeaderT},
+	DeserializeOwned,
+};
+use std::ops::RangeInclusive;
 
 /// Intermediate message proof returned by the source Substrate node. Includes everything
 /// required to submit to the target node: cumulative dispatch weight of bundled messages and
@@ -45,55 +56,38 @@ use std::{marker::PhantomData, ops::RangeInclusive};
 pub type SubstrateMessagesProof<C> = (Weight, FromBridgedChainMessagesProof<HashOf<C>>);
 
 /// Substrate client as Substrate messages source.
-pub struct SubstrateMessagesSource<C: Chain, P: SubstrateMessageLane, I> {
-	client: Client<C>,
+pub struct SubstrateMessagesSource<P: SubstrateMessageLane> {
+	client: Client<P::SourceChain>,
 	lane: P,
 	lane_id: LaneId,
-	instance: ChainId,
 	target_to_source_headers_relay: Option<OnDemandHeadersRelay<P::TargetChain>>,
-	_phantom: PhantomData<I>,
 }
 
-impl<C: Chain, P: SubstrateMessageLane, I> SubstrateMessagesSource<C, P, I> {
+impl<P: SubstrateMessageLane> SubstrateMessagesSource<P> {
 	/// Create new Substrate headers source.
 	pub fn new(
-		client: Client<C>,
+		client: Client<P::SourceChain>,
 		lane: P,
 		lane_id: LaneId,
-		instance: ChainId,
 		target_to_source_headers_relay: Option<OnDemandHeadersRelay<P::TargetChain>>,
 	) -> Self {
-		SubstrateMessagesSource {
-			client,
-			lane,
-			lane_id,
-			instance,
-			target_to_source_headers_relay,
-			_phantom: Default::default(),
-		}
+		SubstrateMessagesSource { client, lane, lane_id, target_to_source_headers_relay }
 	}
 }
 
-impl<C: Chain, P: SubstrateMessageLane, I> Clone for SubstrateMessagesSource<C, P, I> {
+impl<P: SubstrateMessageLane> Clone for SubstrateMessagesSource<P> {
 	fn clone(&self) -> Self {
 		Self {
 			client: self.client.clone(),
 			lane: self.lane.clone(),
 			lane_id: self.lane_id,
-			instance: self.instance,
 			target_to_source_headers_relay: self.target_to_source_headers_relay.clone(),
-			_phantom: Default::default(),
 		}
 	}
 }
 
 #[async_trait]
-impl<C, P, I> RelayClient for SubstrateMessagesSource<C, P, I>
-where
-	C: Chain,
-	P: SubstrateMessageLane,
-	I: Send + Sync + Instance,
-{
+impl<P: SubstrateMessageLane> RelayClient for SubstrateMessagesSource<P> {
 	type Error = SubstrateError;
 
 	async fn reconnect(&mut self) -> Result<(), SubstrateError> {
@@ -102,40 +96,49 @@ where
 }
 
 #[async_trait]
-impl<C, P, I> SourceClient<P> for SubstrateMessagesSource<C, P, I>
+impl<P> SourceClient<P::MessageLane> for SubstrateMessagesSource<P>
 where
-	C: Chain,
-	C::Header: DeserializeOwned,
-	C::Index: DeserializeOwned,
-	C::BlockNumber: BlockNumberBase,
-	P: SubstrateMessageLane<
-		MessagesProof = SubstrateMessagesProof<C>,
-		SourceChainBalance = C::Balance,
-		SourceHeaderNumber = <C::Header as HeaderT>::Number,
-		SourceHeaderHash = <C::Header as HeaderT>::Hash,
-		SourceChain = C,
+	P: SubstrateMessageLane,
+	P::SourceChain: Chain<
+		Hash = <P::MessageLane as MessageLane>::SourceHeaderHash,
+		BlockNumber = <P::MessageLane as MessageLane>::SourceHeaderNumber,
+		Balance = <P::MessageLane as MessageLane>::SourceChainBalance,
 	>,
-	P::TargetChain: Chain<Hash = P::TargetHeaderHash, BlockNumber = P::TargetHeaderNumber>,
-	P::TargetHeaderNumber: Decode,
-	P::TargetHeaderHash: Decode,
-	I: Send + Sync + Instance,
+	BalanceOf<P::SourceChain>: Decode + Bounded,
+	IndexOf<P::SourceChain>: DeserializeOwned,
+	HashOf<P::SourceChain>: Copy,
+	BlockNumberOf<P::SourceChain>: BlockNumberBase + Copy,
+	HeaderOf<P::SourceChain>: DeserializeOwned,
+	P::TargetChain: Chain<
+		Hash = <P::MessageLane as MessageLane>::TargetHeaderHash,
+		BlockNumber = <P::MessageLane as MessageLane>::TargetHeaderNumber,
+	>,
+
+	P::MessageLane: MessageLane<
+		MessagesProof = SubstrateMessagesProof<P::SourceChain>,
+		MessagesReceivingProof = SubstrateMessagesReceivingProof<P::TargetChain>,
+	>,
+	<P::MessageLane as MessageLane>::TargetHeaderNumber: Decode,
+	<P::MessageLane as MessageLane>::TargetHeaderHash: Decode,
+	<P::MessageLane as MessageLane>::SourceChainBalance: AtLeast32BitUnsigned,
 {
-	async fn state(&self) -> Result<SourceClientState<P>, SubstrateError> {
+	async fn state(&self) -> Result<SourceClientState<P::MessageLane>, SubstrateError> {
 		// we can't continue to deliver confirmations if source node is out of sync, because
 		// it may have already received confirmations that we're going to deliver
 		self.client.ensure_synced().await?;
 
-		read_client_state::<_, P::TargetHeaderHash, P::TargetHeaderNumber>(
-			&self.client,
-			P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE,
-		)
+		read_client_state::<
+			_,
+			<P::MessageLane as MessageLane>::TargetHeaderHash,
+			<P::MessageLane as MessageLane>::TargetHeaderNumber,
+		>(&self.client, P::BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE)
 		.await
 	}
 
 	async fn latest_generated_nonce(
 		&self,
-		id: SourceHeaderIdOf<P>,
-	) -> Result<(SourceHeaderIdOf<P>, MessageNonce), SubstrateError> {
+		id: SourceHeaderIdOf<P::MessageLane>,
+	) -> Result<(SourceHeaderIdOf<P::MessageLane>, MessageNonce), SubstrateError> {
 		let encoded_response = self
 			.client
 			.state_call(
@@ -144,15 +147,15 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_generated_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_generated_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_generated_nonce))
 	}
 
 	async fn latest_confirmed_received_nonce(
 		&self,
-		id: SourceHeaderIdOf<P>,
-	) -> Result<(SourceHeaderIdOf<P>, MessageNonce), SubstrateError> {
+		id: SourceHeaderIdOf<P::MessageLane>,
+	) -> Result<(SourceHeaderIdOf<P::MessageLane>, MessageNonce), SubstrateError> {
 		let encoded_response = self
 			.client
 			.state_call(
@@ -161,16 +164,19 @@ where
 				Some(id.1),
 			)
 			.await?;
-		let latest_received_nonce: MessageNonce =
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
 		Ok((id, latest_received_nonce))
 	}
 
 	async fn generated_message_details(
 		&self,
-		id: SourceHeaderIdOf<P>,
+		id: SourceHeaderIdOf<P::MessageLane>,
 		nonces: RangeInclusive<MessageNonce>,
-	) -> Result<MessageDetailsMap<P::SourceChainBalance>, SubstrateError> {
+	) -> Result<
+		MessageDetailsMap<<P::MessageLane as MessageLane>::SourceChainBalance>,
+		SubstrateError,
+	> {
 		let encoded_response = self
 			.client
 			.state_call(
@@ -180,37 +186,46 @@ where
 			)
 			.await?;
 
-		make_message_details_map::<C>(
-			Decode::decode(&mut &encoded_response.0[..]).map_err(SubstrateError::ResponseParseFailed)?,
+		make_message_details_map::<P::SourceChain>(
+			Decode::decode(&mut &encoded_response.0[..])
+				.map_err(SubstrateError::ResponseParseFailed)?,
 			nonces,
 		)
 	}
 
 	async fn prove_messages(
 		&self,
-		id: SourceHeaderIdOf<P>,
+		id: SourceHeaderIdOf<P::MessageLane>,
 		nonces: RangeInclusive<MessageNonce>,
 		proof_parameters: MessageProofParameters,
-	) -> Result<(SourceHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesProof), SubstrateError> {
-		let mut storage_keys = Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1);
+	) -> Result<
+		(
+			SourceHeaderIdOf<P::MessageLane>,
+			RangeInclusive<MessageNonce>,
+			<P::MessageLane as MessageLane>::MessagesProof,
+		),
+		SubstrateError,
+	> {
+		let mut storage_keys =
+			Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1);
 		let mut message_nonce = *nonces.start();
 		while message_nonce <= *nonces.end() {
-			let message_key = pallet_bridge_messages::storage_keys::message_key::<I>(&self.lane_id, message_nonce);
+			let message_key = pallet_bridge_messages::storage_keys::message_key(
+				P::MESSAGE_PALLET_NAME_AT_SOURCE,
+				&self.lane_id,
+				message_nonce,
+			);
 			storage_keys.push(message_key);
 			message_nonce += 1;
 		}
 		if proof_parameters.outbound_state_proof_required {
-			storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key::<I>(
+			storage_keys.push(pallet_bridge_messages::storage_keys::outbound_lane_data_key(
+				P::MESSAGE_PALLET_NAME_AT_SOURCE,
 				&self.lane_id,
 			));
 		}
 
-		let proof = self
-			.client
-			.prove_storage(storage_keys, id.1)
-			.await?
-			.iter_nodes()
-			.collect();
+		let proof = self.client.prove_storage(storage_keys, id.1).await?.iter_nodes().collect();
 		let proof = FromBridgedChainMessagesProof {
 			bridged_header_hash: id.1,
 			storage_proof: proof,
@@ -223,33 +238,87 @@ where
 
 	async fn submit_messages_receiving_proof(
 		&self,
-		generated_at_block: TargetHeaderIdOf<P>,
-		proof: P::MessagesReceivingProof,
+		generated_at_block: TargetHeaderIdOf<P::MessageLane>,
+		proof: <P::MessageLane as MessageLane>::MessagesReceivingProof,
 	) -> Result<(), SubstrateError> {
+		let lane = self.lane.clone();
 		self.client
-			.submit_signed_extrinsic(self.lane.source_transactions_author(), move |transaction_nonce| {
-				self.lane
-					.make_messages_receiving_proof_transaction(transaction_nonce, generated_at_block, proof)
-			})
+			.submit_signed_extrinsic(
+				self.lane.source_transactions_author(),
+				move |best_block_id, transaction_nonce| {
+					lane.make_messages_receiving_proof_transaction(
+						best_block_id,
+						transaction_nonce,
+						generated_at_block,
+						proof,
+					)
+				},
+			)
 			.await?;
 		Ok(())
 	}
 
-	async fn require_target_header_on_source(&self, id: TargetHeaderIdOf<P>) {
+	async fn require_target_header_on_source(&self, id: TargetHeaderIdOf<P::MessageLane>) {
 		if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay {
 			target_to_source_headers_relay.require_finalized_header(id).await;
 		}
 	}
 
-	async fn estimate_confirmation_transaction(&self) -> P::SourceChainBalance {
-		num_traits::Zero::zero() // TODO: https://github.com/paritytech/parity-bridges-common/issues/997
+	async fn estimate_confirmation_transaction(
+		&self,
+	) -> <P::MessageLane as MessageLane>::SourceChainBalance {
+		self.client
+			.estimate_extrinsic_fee(self.lane.make_messages_receiving_proof_transaction(
+				HeaderId(Default::default(), Default::default()),
+				Zero::zero(),
+				HeaderId(Default::default(), Default::default()),
+				prepare_dummy_messages_delivery_proof::<P::SourceChain, P::TargetChain>(),
+			))
+			.await
+			.map(|fee| fee.inclusion_fee())
+			.unwrap_or_else(|_| BalanceOf::<P::SourceChain>::max_value())
 	}
 }
 
+/// Prepare 'dummy' messages delivery proof that will compose the delivery confirmation transaction.
+///
+/// We don't care about proof actually being the valid proof, because its validity doesn't
+/// affect the call weight - we only care about its size.
+fn prepare_dummy_messages_delivery_proof<SC: Chain, TC: Chain>(
+) -> SubstrateMessagesReceivingProof<TC> {
+	let single_message_confirmation_size = bp_messages::InboundLaneData::<()>::encoded_size_hint(
+		SC::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE,
+		1,
+		1,
+	)
+	.unwrap_or(u32::MAX);
+	let proof_size = TC::STORAGE_PROOF_OVERHEAD.saturating_add(single_message_confirmation_size);
+	(
+		UnrewardedRelayersState {
+			unrewarded_relayer_entries: 1,
+			messages_in_oldest_entry: 1,
+			total_messages: 1,
+		},
+		FromBridgedChainMessagesDeliveryProof {
+			bridged_header_hash: Default::default(),
+			storage_proof: vec![vec![0; proof_size as usize]],
+			lane: Default::default(),
+		},
+	)
+}
+
+/// Read best blocks from given client.
+///
+/// This function assumes that the chain that is followed by the `self_client` has
+/// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name`
+/// runtime API to read the best finalized Bridged chain header.
 pub async fn read_client_state<SelfChain, BridgedHeaderHash, BridgedHeaderNumber>(
 	self_client: &Client<SelfChain>,
 	best_finalized_header_id_method_name: &str,
-) -> Result<ClientState<HeaderIdOf<SelfChain>, HeaderId<BridgedHeaderHash, BridgedHeaderNumber>>, SubstrateError>
+) -> Result<
+	ClientState<HeaderIdOf<SelfChain>, HeaderId<BridgedHeaderHash, BridgedHeaderNumber>>,
+	SubstrateError,
+>
 where
 	SelfChain: Chain,
 	SelfChain::Header: DeserializeOwned,
@@ -259,8 +328,10 @@ where
 {
 	// let's read our state first: we need best finalized header hash on **this** chain
 	let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?;
-	let self_best_finalized_header = self_client.header_by_hash(self_best_finalized_header_hash).await?;
-	let self_best_finalized_id = HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash);
+	let self_best_finalized_header =
+		self_client.header_by_hash(self_best_finalized_header_hash).await?;
+	let self_best_finalized_id =
+		HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash);
 
 	// now let's read our best header on **this** chain
 	let self_best_header = self_client.best_header().await?;
@@ -276,11 +347,10 @@ where
 		)
 		.await?;
 	let decoded_best_finalized_peer_on_self: (BridgedHeaderNumber, BridgedHeaderHash) =
-		Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]).map_err(SubstrateError::ResponseParseFailed)?;
-	let peer_on_self_best_finalized_id = HeaderId(
-		decoded_best_finalized_peer_on_self.0,
-		decoded_best_finalized_peer_on_self.1,
-	);
+		Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
+	let peer_on_self_best_finalized_id =
+		HeaderId(decoded_best_finalized_peer_on_self.0, decoded_best_finalized_peer_on_self.1);
 
 	Ok(ClientState {
 		best_self: self_best_id,
@@ -295,7 +365,7 @@ fn make_message_details_map<C: Chain>(
 ) -> Result<MessageDetailsMap<C::Balance>, SubstrateError> {
 	let make_missing_nonce_error = |expected_nonce| {
 		Err(SubstrateError::Custom(format!(
-			"Missing nonce {} in messages_dispatch_weight call result. Expected all nonces from {:?}",
+			"Missing nonce {} in message_details call result. Expected all nonces from {:?}",
 			expected_nonce, nonces,
 		)))
 	};
@@ -304,16 +374,14 @@ fn make_message_details_map<C: Chain>(
 
 	// this is actually prevented by external logic
 	if nonces.is_empty() {
-		return Ok(weights_map);
+		return Ok(weights_map)
 	}
 
 	// check if last nonce is missing - loop below is not checking this
-	let last_nonce_is_missing = weights
-		.last()
-		.map(|details| details.nonce != *nonces.end())
-		.unwrap_or(true);
+	let last_nonce_is_missing =
+		weights.last().map(|details| details.nonce != *nonces.end()).unwrap_or(true);
 	if last_nonce_is_missing {
-		return make_missing_nonce_error(*nonces.end());
+		return make_missing_nonce_error(*nonces.end())
 	}
 
 	let mut expected_nonce = *nonces.start();
@@ -325,20 +393,21 @@ fn make_message_details_map<C: Chain>(
 			(false, true) => {
 				// this may happen if some messages were already pruned from the source node
 				//
-				// this is not critical error and will be auto-resolved by messages lane (and target node)
+				// this is not critical error and will be auto-resolved by messages lane (and target
+				// node)
 				log::info!(
 					target: "bridge",
 					"Some messages are missing from the {} node: {:?}. Target node may be out of sync?",
 					C::NAME,
 					expected_nonce..details.nonce,
 				);
-			}
+			},
 			(false, false) => {
 				// some nonces are missing from the middle/tail of the range
 				//
 				// this is critical error, because we can't miss any nonces
-				return make_missing_nonce_error(expected_nonce);
-			}
+				return make_missing_nonce_error(expected_nonce)
+			},
 		}
 
 		weights_map.insert(
@@ -346,9 +415,8 @@ fn make_message_details_map<C: Chain>(
 			MessageDetails {
 				dispatch_weight: details.dispatch_weight,
 				size: details.size as _,
-				// TODO: https://github.com/paritytech/parity-bridges-common/issues/997
-				reward: num_traits::Zero::zero(),
-				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
+				reward: details.delivery_and_dispatch_fee,
+				dispatch_fee_payment: details.dispatch_fee_payment,
 			},
 		);
 		expected_nonce = details.nonce + 1;
@@ -361,10 +429,13 @@ fn make_message_details_map<C: Chain>(
 #[cfg(test)]
 mod tests {
 	use super::*;
+	use bp_runtime::messages::DispatchFeePayment;
+	use relay_rococo_client::Rococo;
+	use relay_wococo_client::Wococo;
 
 	fn message_details_from_rpc(
 		nonces: RangeInclusive<MessageNonce>,
-	) -> Vec<bp_messages::MessageDetails<bp_rialto::Balance>> {
+	) -> Vec<bp_messages::MessageDetails<bp_wococo::Balance>> {
 		nonces
 			.into_iter()
 			.map(|nonce| bp_messages::MessageDetails {
@@ -380,7 +451,7 @@ mod tests {
 	#[test]
 	fn make_message_details_map_succeeds_if_no_messages_are_missing() {
 		assert_eq!(
-			make_message_details_map::<relay_rialto_client::Rialto>(message_details_from_rpc(1..=3), 1..=3,).unwrap(),
+			make_message_details_map::<Wococo>(message_details_from_rpc(1..=3), 1..=3,).unwrap(),
 			vec![
 				(
 					1,
@@ -418,7 +489,7 @@ mod tests {
 	#[test]
 	fn make_message_details_map_succeeds_if_head_messages_are_missing() {
 		assert_eq!(
-			make_message_details_map::<relay_rialto_client::Rialto>(message_details_from_rpc(2..=3), 1..=3,).unwrap(),
+			make_message_details_map::<Wococo>(message_details_from_rpc(2..=3), 1..=3,).unwrap(),
 			vec![
 				(
 					2,
@@ -449,7 +520,7 @@ mod tests {
 		let mut message_details_from_rpc = message_details_from_rpc(1..=3);
 		message_details_from_rpc.remove(1);
 		assert!(matches!(
-			make_message_details_map::<relay_rialto_client::Rialto>(message_details_from_rpc, 1..=3,),
+			make_message_details_map::<Wococo>(message_details_from_rpc, 1..=3,),
 			Err(SubstrateError::Custom(_))
 		));
 	}
@@ -457,7 +528,7 @@ mod tests {
 	#[test]
 	fn make_message_details_map_fails_if_tail_messages_are_missing() {
 		assert!(matches!(
-			make_message_details_map::<relay_rialto_client::Rialto>(message_details_from_rpc(1..=2), 1..=3,),
+			make_message_details_map::<Wococo>(message_details_from_rpc(1..=2), 1..=3,),
 			Err(SubstrateError::Custom(_))
 		));
 	}
@@ -465,8 +536,21 @@ mod tests {
 	#[test]
 	fn make_message_details_map_fails_if_all_messages_are_missing() {
 		assert!(matches!(
-			make_message_details_map::<relay_rialto_client::Rialto>(vec![], 1..=3),
+			make_message_details_map::<Wococo>(vec![], 1..=3),
 			Err(SubstrateError::Custom(_))
 		));
 	}
+
+	#[test]
+	fn prepare_dummy_messages_delivery_proof_works() {
+		let expected_minimal_size =
+			Wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE + Rococo::STORAGE_PROOF_OVERHEAD;
+		let dummy_proof = prepare_dummy_messages_delivery_proof::<Wococo, Rococo>();
+		assert!(
+			dummy_proof.1.encode().len() as u32 > expected_minimal_size,
+			"Expected proof size at least {}. Got: {}",
+			expected_minimal_size,
+			dummy_proof.1.encode().len(),
+		);
+	}
 }
diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs
new file mode 100644
index 0000000000000000000000000000000000000000..6f95ffd12f09cce60882b7c83e79f307e332ec71
--- /dev/null
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs
@@ -0,0 +1,566 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Substrate client as Substrate messages target. The chain we connect to should have
+//! runtime that implements `<BridgedChainName>HeaderApi` to allow bridging with
+//! <BridgedName> chain.
+
+use crate::{
+	messages_lane::{StandaloneMessagesMetrics, SubstrateMessageLane},
+	messages_source::{read_client_state, SubstrateMessagesProof},
+	on_demand_headers::OnDemandHeadersRelay,
+};
+
+use async_trait::async_trait;
+use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState};
+
+use bridge_runtime_common::messages::{
+	source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof,
+};
+use codec::{Decode, Encode};
+use frame_support::weights::{Weight, WeightToFeePolynomial};
+use messages_relay::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane_loop::{TargetClient, TargetClientState},
+};
+use num_traits::{Bounded, Zero};
+use relay_substrate_client::{
+	BalanceOf, BlockNumberOf, Chain, Client, Error as SubstrateError, HashOf, HeaderOf, IndexOf,
+	WeightToFeeOf,
+};
+use relay_utils::{relay_loop::Client as RelayClient, BlockNumberBase, HeaderId};
+use sp_core::Bytes;
+use sp_runtime::{traits::Saturating, DeserializeOwned, FixedPointNumber, FixedU128};
+use std::{convert::TryFrom, ops::RangeInclusive};
+
+/// Message receiving proof returned by the target Substrate node.
+pub type SubstrateMessagesReceivingProof<C> =
+	(UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof<HashOf<C>>);
+
+/// Substrate client as Substrate messages target.
+pub struct SubstrateMessagesTarget<P: SubstrateMessageLane> {
+	client: Client<P::TargetChain>,
+	lane: P,
+	lane_id: LaneId,
+	metric_values: StandaloneMessagesMetrics,
+	source_to_target_headers_relay: Option<OnDemandHeadersRelay<P::SourceChain>>,
+}
+
+impl<P: SubstrateMessageLane> SubstrateMessagesTarget<P> {
+	/// Create new Substrate headers target.
+	pub fn new(
+		client: Client<P::TargetChain>,
+		lane: P,
+		lane_id: LaneId,
+		metric_values: StandaloneMessagesMetrics,
+		source_to_target_headers_relay: Option<OnDemandHeadersRelay<P::SourceChain>>,
+	) -> Self {
+		SubstrateMessagesTarget {
+			client,
+			lane,
+			lane_id,
+			metric_values,
+			source_to_target_headers_relay,
+		}
+	}
+}
+
+impl<P: SubstrateMessageLane> Clone for SubstrateMessagesTarget<P> {
+	fn clone(&self) -> Self {
+		Self {
+			client: self.client.clone(),
+			lane: self.lane.clone(),
+			lane_id: self.lane_id,
+			metric_values: self.metric_values.clone(),
+			source_to_target_headers_relay: self.source_to_target_headers_relay.clone(),
+		}
+	}
+}
+
+#[async_trait]
+impl<P: SubstrateMessageLane> RelayClient for SubstrateMessagesTarget<P> {
+	type Error = SubstrateError;
+
+	async fn reconnect(&mut self) -> Result<(), SubstrateError> {
+		self.client.reconnect().await
+	}
+}
+
+#[async_trait]
+impl<P> TargetClient<P::MessageLane> for SubstrateMessagesTarget<P>
+where
+	P: SubstrateMessageLane,
+	P::SourceChain: Chain<
+		Hash = <P::MessageLane as MessageLane>::SourceHeaderHash,
+		BlockNumber = <P::MessageLane as MessageLane>::SourceHeaderNumber,
+		Balance = <P::MessageLane as MessageLane>::SourceChainBalance,
+	>,
+	BalanceOf<P::SourceChain>: TryFrom<BalanceOf<P::TargetChain>> + Bounded,
+	P::TargetChain: Chain<
+		Hash = <P::MessageLane as MessageLane>::TargetHeaderHash,
+		BlockNumber = <P::MessageLane as MessageLane>::TargetHeaderNumber,
+	>,
+	IndexOf<P::TargetChain>: DeserializeOwned,
+	HashOf<P::TargetChain>: Copy,
+	BlockNumberOf<P::TargetChain>: Copy,
+	HeaderOf<P::TargetChain>: DeserializeOwned,
+	BlockNumberOf<P::TargetChain>: BlockNumberBase,
+	P::MessageLane: MessageLane<
+		MessagesProof = SubstrateMessagesProof<P::SourceChain>,
+		MessagesReceivingProof = SubstrateMessagesReceivingProof<P::TargetChain>,
+	>,
+	<P::MessageLane as MessageLane>::SourceHeaderNumber: Decode,
+	<P::MessageLane as MessageLane>::SourceHeaderHash: Decode,
+{
+	async fn state(&self) -> Result<TargetClientState<P::MessageLane>, SubstrateError> {
+		// we can't continue to deliver messages if target node is out of sync, because
+		// it may have already received (some of) messages that we're going to deliver
+		self.client.ensure_synced().await?;
+
+		read_client_state::<
+			_,
+			<P::MessageLane as MessageLane>::SourceHeaderHash,
+			<P::MessageLane as MessageLane>::SourceHeaderNumber,
+		>(&self.client, P::BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET)
+		.await
+	}
+
+	async fn latest_received_nonce(
+		&self,
+		id: TargetHeaderIdOf<P::MessageLane>,
+	) -> Result<(TargetHeaderIdOf<P::MessageLane>, MessageNonce), SubstrateError> {
+		let encoded_response = self
+			.client
+			.state_call(
+				P::INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD.into(),
+				Bytes(self.lane_id.encode()),
+				Some(id.1),
+			)
+			.await?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
+		Ok((id, latest_received_nonce))
+	}
+
+	async fn latest_confirmed_received_nonce(
+		&self,
+		id: TargetHeaderIdOf<P::MessageLane>,
+	) -> Result<(TargetHeaderIdOf<P::MessageLane>, MessageNonce), SubstrateError> {
+		let encoded_response = self
+			.client
+			.state_call(
+				P::INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD.into(),
+				Bytes(self.lane_id.encode()),
+				Some(id.1),
+			)
+			.await?;
+		let latest_received_nonce: MessageNonce = Decode::decode(&mut &encoded_response.0[..])
+			.map_err(SubstrateError::ResponseParseFailed)?;
+		Ok((id, latest_received_nonce))
+	}
+
+	async fn unrewarded_relayers_state(
+		&self,
+		id: TargetHeaderIdOf<P::MessageLane>,
+	) -> Result<(TargetHeaderIdOf<P::MessageLane>, UnrewardedRelayersState), SubstrateError> {
+		let encoded_response = self
+			.client
+			.state_call(
+				P::INBOUND_LANE_UNREWARDED_RELAYERS_STATE.into(),
+				Bytes(self.lane_id.encode()),
+				Some(id.1),
+			)
+			.await?;
+		let unrewarded_relayers_state: UnrewardedRelayersState =
+			Decode::decode(&mut &encoded_response.0[..])
+				.map_err(SubstrateError::ResponseParseFailed)?;
+		Ok((id, unrewarded_relayers_state))
+	}
+
+	async fn prove_messages_receiving(
+		&self,
+		id: TargetHeaderIdOf<P::MessageLane>,
+	) -> Result<
+		(TargetHeaderIdOf<P::MessageLane>, <P::MessageLane as MessageLane>::MessagesReceivingProof),
+		SubstrateError,
+	> {
+		let (id, relayers_state) = self.unrewarded_relayers_state(id).await?;
+		let inbound_data_key = pallet_bridge_messages::storage_keys::inbound_lane_data_key(
+			P::MESSAGE_PALLET_NAME_AT_TARGET,
+			&self.lane_id,
+		);
+		let proof = self
+			.client
+			.prove_storage(vec![inbound_data_key], id.1)
+			.await?
+			.iter_nodes()
+			.collect();
+		let proof = FromBridgedChainMessagesDeliveryProof {
+			bridged_header_hash: id.1,
+			storage_proof: proof,
+			lane: self.lane_id,
+		};
+		Ok((id, (relayers_state, proof)))
+	}
+
+	async fn submit_messages_proof(
+		&self,
+		generated_at_header: SourceHeaderIdOf<P::MessageLane>,
+		nonces: RangeInclusive<MessageNonce>,
+		proof: <P::MessageLane as MessageLane>::MessagesProof,
+	) -> Result<RangeInclusive<MessageNonce>, SubstrateError> {
+		let lane = self.lane.clone();
+		let nonces_clone = nonces.clone();
+		self.client
+			.submit_signed_extrinsic(
+				self.lane.target_transactions_author(),
+				move |best_block_id, transaction_nonce| {
+					lane.make_messages_delivery_transaction(
+						best_block_id,
+						transaction_nonce,
+						generated_at_header,
+						nonces_clone,
+						proof,
+					)
+				},
+			)
+			.await?;
+		Ok(nonces)
+	}
+
+	async fn require_source_header_on_target(&self, id: SourceHeaderIdOf<P::MessageLane>) {
+		if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay {
+			source_to_target_headers_relay.require_finalized_header(id).await;
+		}
+	}
+
+	async fn estimate_delivery_transaction_in_source_tokens(
+		&self,
+		nonces: RangeInclusive<MessageNonce>,
+		total_prepaid_nonces: MessageNonce,
+		total_dispatch_weight: Weight,
+		total_size: u32,
+	) -> Result<<P::MessageLane as MessageLane>::SourceChainBalance, SubstrateError> {
+		let conversion_rate =
+			self.metric_values.target_to_source_conversion_rate().await.ok_or_else(|| {
+				SubstrateError::Custom(format!(
+					"Failed to compute conversion rate from {} to {}",
+					P::TargetChain::NAME,
+					P::SourceChain::NAME,
+				))
+			})?;
+
+		// Prepare 'dummy' delivery transaction - we only care about its length and dispatch weight.
+		let delivery_tx = self.lane.make_messages_delivery_transaction(
+			HeaderId(Default::default(), Default::default()),
+			Zero::zero(),
+			HeaderId(Default::default(), Default::default()),
+			nonces.clone(),
+			prepare_dummy_messages_proof::<P::SourceChain>(
+				nonces.clone(),
+				total_dispatch_weight,
+				total_size,
+			),
+		);
+		let delivery_tx_fee = self.client.estimate_extrinsic_fee(delivery_tx).await?;
+		let inclusion_fee_in_target_tokens = delivery_tx_fee.inclusion_fee();
+
+		// The pre-dispatch cost of delivery transaction includes additional fee to cover dispatch
+		// fee payment (Currency::transfer in regular deployment). But if message dispatch has
+		// already been paid at the Source chain, the delivery transaction will refund relayer with
+		// this additional cost. But `estimate_extrinsic_fee` obviously just returns pre-dispatch
+		// cost of the transaction. So if transaction delivers prepaid message, then it may happen
+		// that pre-dispatch cost is larger than reward and `Rational` relayer will refuse to
+		// deliver this message.
+		//
+		// The most obvious solution would be to deduct total weight of dispatch fee payments from
+		// the `total_dispatch_weight` and use regular `estimate_extrinsic_fee` call. But what if
+		// `total_dispatch_weight` is less than total dispatch fee payments weight? Weight is
+		// strictly positive, so we can't use this option.
+		//
+		// Instead we'll be directly using `WeightToFee` and `NextFeeMultiplier` of the Target
+		// chain. This requires more knowledge of the Target chain, but seems there's no better way
+		// to solve this now.
+		let expected_refund_in_target_tokens = if total_prepaid_nonces != 0 {
+			const WEIGHT_DIFFERENCE: Weight = 100;
+
+			let larger_dispatch_weight = total_dispatch_weight.saturating_add(WEIGHT_DIFFERENCE);
+			let larger_delivery_tx_fee = self
+				.client
+				.estimate_extrinsic_fee(self.lane.make_messages_delivery_transaction(
+					HeaderId(Default::default(), Default::default()),
+					Zero::zero(),
+					HeaderId(Default::default(), Default::default()),
+					nonces.clone(),
+					prepare_dummy_messages_proof::<P::SourceChain>(
+						nonces.clone(),
+						larger_dispatch_weight,
+						total_size,
+					),
+				))
+				.await?;
+
+			compute_prepaid_messages_refund::<P>(
+				total_prepaid_nonces,
+				compute_fee_multiplier::<P::TargetChain>(
+					delivery_tx_fee.adjusted_weight_fee,
+					total_dispatch_weight,
+					larger_delivery_tx_fee.adjusted_weight_fee,
+					larger_dispatch_weight,
+				),
+			)
+		} else {
+			Zero::zero()
+		};
+
+		let delivery_fee_in_source_tokens =
+			convert_target_tokens_to_source_tokens::<P::SourceChain, P::TargetChain>(
+				FixedU128::from_float(conversion_rate),
+				inclusion_fee_in_target_tokens.saturating_sub(expected_refund_in_target_tokens),
+			);
+
+		log::trace!(
+			target: "bridge",
+			"Estimated {} -> {} messages delivery transaction.\n\t\
+				Total nonces: {:?}\n\t\
+				Prepaid messages: {}\n\t\
+				Total messages size: {}\n\t\
+				Total messages dispatch weight: {}\n\t\
+				Inclusion fee (in {1} tokens): {:?}\n\t\
+				Expected refund (in {1} tokens): {:?}\n\t\
+				{1} -> {0} conversion rate: {:?}\n\t\
+				Expected delivery tx fee (in {0} tokens): {:?}",
+				P::SourceChain::NAME,
+				P::TargetChain::NAME,
+				nonces,
+				total_prepaid_nonces,
+				total_size,
+				total_dispatch_weight,
+				inclusion_fee_in_target_tokens,
+				expected_refund_in_target_tokens,
+				conversion_rate,
+				delivery_fee_in_source_tokens,
+		);
+
+		Ok(delivery_fee_in_source_tokens)
+	}
+}
+
+/// Prepare 'dummy' messages proof that will compose the delivery transaction.
+///
+/// We don't care about proof actually being the valid proof, because its validity doesn't
+/// affect the call weight - we only care about its size.
+fn prepare_dummy_messages_proof<SC: Chain>(
+	nonces: RangeInclusive<MessageNonce>,
+	total_dispatch_weight: Weight,
+	total_size: u32,
+) -> SubstrateMessagesProof<SC> {
+	(
+		total_dispatch_weight,
+		FromBridgedChainMessagesProof {
+			bridged_header_hash: Default::default(),
+			storage_proof: vec![vec![
+				0;
+				SC::STORAGE_PROOF_OVERHEAD.saturating_add(total_size) as usize
+			]],
+			lane: Default::default(),
+			nonces_start: *nonces.start(),
+			nonces_end: *nonces.end(),
+		},
+	)
+}
+
+/// Given delivery transaction fee in target chain tokens and conversion rate to the source
+/// chain tokens, compute transaction cost in source chain tokens.
+fn convert_target_tokens_to_source_tokens<SC: Chain, TC: Chain>(
+	target_to_source_conversion_rate: FixedU128,
+	target_transaction_fee: TC::Balance,
+) -> SC::Balance
+where
+	SC::Balance: TryFrom<TC::Balance>,
+{
+	SC::Balance::try_from(
+		target_to_source_conversion_rate.saturating_mul_int(target_transaction_fee),
+	)
+	.unwrap_or_else(|_| SC::Balance::max_value())
+}
+
+/// Compute fee multiplier that is used by the chain, given a couple of fees for transactions
+/// that are only differ in dispatch weights.
+///
+/// This function assumes that standard transaction payment pallet is used by the chain.
+/// The only fee component that depends on dispatch weight is the `adjusted_weight_fee`.
+///
+/// **WARNING**: this functions will only be accurate if weight-to-fee conversion function
+/// is linear. For non-linear polynomials the error will grow with `weight_difference` growth.
+/// So better to use smaller differences.
+fn compute_fee_multiplier<C: Chain>(
+	smaller_adjusted_weight_fee: BalanceOf<C>,
+	smaller_tx_weight: Weight,
+	larger_adjusted_weight_fee: BalanceOf<C>,
+	larger_tx_weight: Weight,
+) -> FixedU128 {
+	let adjusted_weight_fee_difference =
+		larger_adjusted_weight_fee.saturating_sub(smaller_adjusted_weight_fee);
+	let smaller_tx_unadjusted_weight_fee = WeightToFeeOf::<C>::calc(&smaller_tx_weight);
+	let larger_tx_unadjusted_weight_fee = WeightToFeeOf::<C>::calc(&larger_tx_weight);
+	FixedU128::saturating_from_rational(
+		adjusted_weight_fee_difference,
+		larger_tx_unadjusted_weight_fee.saturating_sub(smaller_tx_unadjusted_weight_fee),
+	)
+}
+
+/// Compute fee that will be refunded to the relayer because dispatch of `total_prepaid_nonces`
+/// messages has been paid at the source chain.
+fn compute_prepaid_messages_refund<P: SubstrateMessageLane>(
+	total_prepaid_nonces: MessageNonce,
+	fee_multiplier: FixedU128,
+) -> BalanceOf<P::TargetChain> {
+	fee_multiplier.saturating_mul_int(WeightToFeeOf::<P::TargetChain>::calc(
+		&P::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN.saturating_mul(total_prepaid_nonces),
+	))
+}
+
+#[cfg(test)]
+mod tests {
+	use super::*;
+	use relay_rococo_client::{Rococo, SigningParams as RococoSigningParams};
+	use relay_wococo_client::{SigningParams as WococoSigningParams, Wococo};
+
+	#[derive(Clone)]
+	struct TestSubstrateMessageLane;
+
+	impl SubstrateMessageLane for TestSubstrateMessageLane {
+		type MessageLane = crate::messages_lane::SubstrateMessageLaneToSubstrate<
+			Rococo,
+			RococoSigningParams,
+			Wococo,
+			WococoSigningParams,
+		>;
+
+		const OUTBOUND_LANE_MESSAGE_DETAILS_METHOD: &'static str = "";
+		const OUTBOUND_LANE_LATEST_GENERATED_NONCE_METHOD: &'static str = "";
+		const OUTBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = "";
+
+		const INBOUND_LANE_LATEST_RECEIVED_NONCE_METHOD: &'static str = "";
+		const INBOUND_LANE_LATEST_CONFIRMED_NONCE_METHOD: &'static str = "";
+		const INBOUND_LANE_UNREWARDED_RELAYERS_STATE: &'static str = "";
+
+		const BEST_FINALIZED_SOURCE_HEADER_ID_AT_TARGET: &'static str = "";
+		const BEST_FINALIZED_TARGET_HEADER_ID_AT_SOURCE: &'static str = "";
+
+		const MESSAGE_PALLET_NAME_AT_SOURCE: &'static str = "";
+		const MESSAGE_PALLET_NAME_AT_TARGET: &'static str = "";
+
+		const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN: Weight = 100_000;
+
+		type SourceChain = Rococo;
+		type TargetChain = Wococo;
+
+		fn source_transactions_author(&self) -> bp_rococo::AccountId {
+			unreachable!()
+		}
+
+		fn make_messages_receiving_proof_transaction(
+			&self,
+			_best_block_id: SourceHeaderIdOf<Self::MessageLane>,
+			_transaction_nonce: IndexOf<Rococo>,
+			_generated_at_block: TargetHeaderIdOf<Self::MessageLane>,
+			_proof: <Self::MessageLane as MessageLane>::MessagesReceivingProof,
+		) -> Bytes {
+			unreachable!()
+		}
+
+		fn target_transactions_author(&self) -> bp_wococo::AccountId {
+			unreachable!()
+		}
+
+		fn make_messages_delivery_transaction(
+			&self,
+			_best_block_id: TargetHeaderIdOf<Self::MessageLane>,
+			_transaction_nonce: IndexOf<Wococo>,
+			_generated_at_header: SourceHeaderIdOf<Self::MessageLane>,
+			_nonces: RangeInclusive<MessageNonce>,
+			_proof: <Self::MessageLane as MessageLane>::MessagesProof,
+		) -> Bytes {
+			unreachable!()
+		}
+	}
+
+	#[test]
+	fn prepare_dummy_messages_proof_works() {
+		const DISPATCH_WEIGHT: Weight = 1_000_000;
+		const SIZE: u32 = 1_000;
+		let dummy_proof = prepare_dummy_messages_proof::<Rococo>(1..=10, DISPATCH_WEIGHT, SIZE);
+		assert_eq!(dummy_proof.0, DISPATCH_WEIGHT);
+		assert!(
+			dummy_proof.1.encode().len() as u32 > SIZE,
+			"Expected proof size at least {}. Got: {}",
+			SIZE,
+			dummy_proof.1.encode().len(),
+		);
+	}
+
+	#[test]
+	fn convert_target_tokens_to_source_tokens_works() {
+		assert_eq!(
+			convert_target_tokens_to_source_tokens::<Rococo, Wococo>((150, 100).into(), 1_000),
+			1_500
+		);
+		assert_eq!(
+			convert_target_tokens_to_source_tokens::<Rococo, Wococo>((50, 100).into(), 1_000),
+			500
+		);
+		assert_eq!(
+			convert_target_tokens_to_source_tokens::<Rococo, Wococo>((100, 100).into(), 1_000),
+			1_000
+		);
+	}
+
+	#[test]
+	fn compute_fee_multiplier_returns_sane_results() {
+		let multiplier = FixedU128::saturating_from_rational(1, 1000);
+
+		let smaller_weight = 1_000_000;
+		let smaller_adjusted_weight_fee =
+			multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&smaller_weight));
+
+		let larger_weight = smaller_weight + 200_000;
+		let larger_adjusted_weight_fee =
+			multiplier.saturating_mul_int(WeightToFeeOf::<Rococo>::calc(&larger_weight));
+
+		assert_eq!(
+			compute_fee_multiplier::<Rococo>(
+				smaller_adjusted_weight_fee,
+				smaller_weight,
+				larger_adjusted_weight_fee,
+				larger_weight,
+			),
+			multiplier,
+		);
+	}
+
+	#[test]
+	fn compute_prepaid_messages_refund_returns_sane_results() {
+		assert!(
+			compute_prepaid_messages_refund::<TestSubstrateMessageLane>(
+				10,
+				FixedU128::saturating_from_rational(110, 100),
+			) > (10 * TestSubstrateMessageLane::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_TARGET_CHAIN)
+				.into()
+		);
+	}
+}
diff --git a/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs b/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
similarity index 78%
rename from polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs
rename to polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
index 4a2b04328b8624a47e9cc515832e0c53d8ddab2b..ee141866eb97d5d4ff3c82874b4d9b5296b1c88c 100644
--- a/polkadot/bridges/relays/bin-substrate/src/on_demand_headers.rs
+++ b/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs
@@ -16,32 +16,38 @@
 
 //! On-demand Substrate -> Substrate headers relay.
 
-use crate::finality_pipeline::{
-	SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT, STALL_TIMEOUT,
-};
-use crate::finality_target::SubstrateFinalityTarget;
+use std::fmt::Debug;
 
 use async_std::sync::{Arc, Mutex};
-use bp_header_chain::justification::GrandpaJustification;
+use futures::{select, FutureExt};
+use num_traits::{CheckedSub, One, Zero};
+
 use finality_relay::{
 	FinalitySyncParams, FinalitySyncPipeline, SourceClient as FinalitySourceClient, SourceHeader,
 	TargetClient as FinalityTargetClient,
 };
-use futures::{select, FutureExt};
-use num_traits::{CheckedSub, One, Zero};
 use relay_substrate_client::{
 	finality_source::{FinalitySource as SubstrateFinalitySource, RequiredHeaderNumberRef},
-	BlockNumberOf, Chain, Client, HashOf, HeaderIdOf, SyncHeader,
+	Chain, Client, HeaderIdOf, SyncHeader,
 };
 use relay_utils::{
-	metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, MaybeConnectionError,
+	metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient,
+	MaybeConnectionError,
+};
+
+use crate::{
+	finality_pipeline::{
+		SubstrateFinalitySyncPipeline, SubstrateFinalityToSubstrate, RECENT_FINALITY_PROOFS_LIMIT,
+	},
+	finality_target::SubstrateFinalityTarget,
+	STALL_TIMEOUT,
 };
-use std::fmt::Debug;
 
 /// On-demand Substrate <-> Substrate headers relay.
 ///
-/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages relay) needs
-/// it to continue its regular work. When enough headers are relayed, on-demand stops syncing headers.
+/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages
+/// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops
+/// syncing headers.
 #[derive(Clone)]
 pub struct OnDemandHeadersRelay<SourceChain: Chain> {
 	/// Relay task name.
@@ -52,11 +58,13 @@ pub struct OnDemandHeadersRelay<SourceChain: Chain> {
 
 impl<SourceChain: Chain> OnDemandHeadersRelay<SourceChain> {
 	/// Create new on-demand headers relay.
-	pub fn new<TargetChain: Chain, TargetSign>(
+	pub fn new<TargetChain: Chain, TargetSign, P>(
 		source_client: Client<SourceChain>,
 		target_client: Client<TargetChain>,
-		pipeline: SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
+		target_transactions_mortality: Option<u32>,
+		pipeline: P,
 		maximal_headers_difference: SourceChain::BlockNumber,
+		only_mandatory_headers: bool,
 	) -> Self
 	where
 		SourceChain: Chain + Debug,
@@ -64,15 +72,14 @@ impl<SourceChain: Chain> OnDemandHeadersRelay<SourceChain> {
 		TargetChain: Chain + Debug,
 		TargetChain::BlockNumber: BlockNumberBase,
 		TargetSign: Clone + Send + Sync + 'static,
-		SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>: SubstrateFinalitySyncPipeline<
-			Hash = HashOf<SourceChain>,
-			Number = BlockNumberOf<SourceChain>,
-			Header = SyncHeader<SourceChain::Header>,
-			FinalityProof = GrandpaJustification<SourceChain::Header>,
+		P: SubstrateFinalitySyncPipeline<
+			FinalitySyncPipeline = SubstrateFinalityToSubstrate<
+				SourceChain,
+				TargetChain,
+				TargetSign,
+			>,
 			TargetChain = TargetChain,
 		>,
-		SubstrateFinalityTarget<TargetChain, SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>>:
-			FinalityTargetClient<SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>>,
 	{
 		let required_header_number = Arc::new(Mutex::new(Zero::zero()));
 		let this = OnDemandHeadersRelay {
@@ -83,8 +90,10 @@ impl<SourceChain: Chain> OnDemandHeadersRelay<SourceChain> {
 			background_task(
 				source_client,
 				target_client,
+				target_transactions_mortality,
 				pipeline,
 				maximal_headers_difference,
+				only_mandatory_headers,
 				required_header_number,
 			)
 			.await;
@@ -111,11 +120,13 @@ impl<SourceChain: Chain> OnDemandHeadersRelay<SourceChain> {
 }
 
 /// Background task that is responsible for starting headers relay.
-async fn background_task<SourceChain, TargetChain, TargetSign>(
+async fn background_task<SourceChain, TargetChain, TargetSign, P>(
 	source_client: Client<SourceChain>,
 	target_client: Client<TargetChain>,
-	pipeline: SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
+	target_transactions_mortality: Option<u32>,
+	pipeline: P,
 	maximal_headers_difference: SourceChain::BlockNumber,
+	only_mandatory_headers: bool,
 	required_header_number: RequiredHeaderNumberRef<SourceChain>,
 ) where
 	SourceChain: Chain + Debug,
@@ -123,22 +134,21 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 	TargetChain: Chain + Debug,
 	TargetChain::BlockNumber: BlockNumberBase,
 	TargetSign: Clone + Send + Sync + 'static,
-	SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>: SubstrateFinalitySyncPipeline<
-		Hash = HashOf<SourceChain>,
-		Number = BlockNumberOf<SourceChain>,
-		Header = SyncHeader<SourceChain::Header>,
-		FinalityProof = GrandpaJustification<SourceChain::Header>,
+	P: SubstrateFinalitySyncPipeline<
+		FinalitySyncPipeline = SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
 		TargetChain = TargetChain,
 	>,
-	SubstrateFinalityTarget<TargetChain, SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>>:
-		FinalityTargetClient<SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>>,
 {
 	let relay_task_name = on_demand_headers_relay_name::<SourceChain, TargetChain>();
 	let mut finality_source = SubstrateFinalitySource::<
 		_,
 		SubstrateFinalityToSubstrate<SourceChain, TargetChain, TargetSign>,
 	>::new(source_client.clone(), Some(required_header_number.clone()));
-	let mut finality_target = SubstrateFinalityTarget::new(target_client.clone(), pipeline.clone());
+	let mut finality_target = SubstrateFinalityTarget::new(
+		target_client.clone(),
+		pipeline.clone(),
+		target_transactions_mortality,
+	);
 	let mut latest_non_mandatory_at_source = Zero::zero();
 
 	let mut restart_relay = true;
@@ -165,12 +175,16 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 				&mut finality_target,
 			)
 			.await;
-			continue;
+			continue
 		}
 
 		// read best finalized source header number from target
-		let best_finalized_source_header_at_target =
-			best_finalized_source_header_at_target::<SourceChain, _, _>(&finality_target, &relay_task_name).await;
+		let best_finalized_source_header_at_target = best_finalized_source_header_at_target::<
+			SourceChain,
+			_,
+			_,
+		>(&finality_target, &relay_task_name)
+		.await;
 		if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) {
 			relay_utils::relay_loop::reconnect_failed_client(
 				FailedClient::Target,
@@ -179,11 +193,12 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 				&mut finality_target,
 			)
 			.await;
-			continue;
+			continue
 		}
 
 		// submit mandatory header if some headers are missing
-		let best_finalized_source_header_at_target_fmt = format!("{:?}", best_finalized_source_header_at_target);
+		let best_finalized_source_header_at_target_fmt =
+			format!("{:?}", best_finalized_source_header_at_target);
 		let mandatory_scan_range = mandatory_headers_scan_range::<SourceChain>(
 			best_finalized_source_header_at_source.ok(),
 			best_finalized_source_header_at_target.ok(),
@@ -209,8 +224,8 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 					// there are no (or we don't need to relay them) mandatory headers in the range
 					// => to avoid scanning the same headers over and over again, remember that
 					latest_non_mandatory_at_source = mandatory_scan_range.1;
-				}
-				Err(e) => {
+				},
+				Err(e) =>
 					if e.is_connection_error() {
 						relay_utils::relay_loop::reconnect_failed_client(
 							FailedClient::Source,
@@ -219,9 +234,8 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 							&mut finality_target,
 						)
 						.await;
-						continue;
-					}
-				}
+						continue
+					},
 			}
 		}
 
@@ -232,10 +246,13 @@ async fn background_task<SourceChain, TargetChain, TargetSign>(
 					finality_source.clone(),
 					finality_target.clone(),
 					FinalitySyncParams {
-						tick: std::cmp::max(SourceChain::AVERAGE_BLOCK_INTERVAL, TargetChain::AVERAGE_BLOCK_INTERVAL),
+						tick: std::cmp::max(
+							SourceChain::AVERAGE_BLOCK_INTERVAL,
+							TargetChain::AVERAGE_BLOCK_INTERVAL,
+						),
 						recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT,
 						stall_timeout: STALL_TIMEOUT,
-						only_mandatory_headers: false,
+						only_mandatory_headers,
 					},
 					MetricsParams::disabled(),
 					futures::future::pending(),
@@ -281,12 +298,12 @@ async fn mandatory_headers_scan_range<C: Chain>(
 		.checked_sub(&best_finalized_source_header_at_target)
 		.unwrap_or_else(Zero::zero);
 	if current_headers_difference <= maximal_headers_difference {
-		return None;
+		return None
 	}
 
 	// if relay is already asked to sync headers, don't do anything yet
 	if required_header_number > best_finalized_source_header_at_target {
-		return None;
+		return None
 	}
 
 	Some((
@@ -295,7 +312,8 @@ async fn mandatory_headers_scan_range<C: Chain>(
 	))
 }
 
-/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay it.
+/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay
+/// it.
 ///
 /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise.
 async fn relay_mandatory_header_from_range<SourceChain: Chain, P>(
@@ -310,7 +328,8 @@ where
 	P: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
 	// search for mandatory header first
-	let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?;
+	let mandatory_source_header_number =
+		find_mandatory_header_in_range(finality_source, range).await?;
 
 	// if there are no mandatory headers - we have nothing to do
 	let mandatory_source_header_number = match mandatory_source_header_number {
@@ -322,7 +341,7 @@ where
 	// less than our `mandatory_source_header_number` before logging anything
 	let mut required_header_number = required_header_number.lock().await;
 	if *required_header_number >= mandatory_source_header_number {
-		return Ok(false);
+		return Ok(false)
 	}
 
 	log::trace!(
@@ -350,19 +369,16 @@ where
 	SubstrateFinalitySource<SourceChain, P>: FinalitySourceClient<P>,
 	P: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
-	finality_source
-		.on_chain_best_finalized_block_number()
-		.await
-		.map_err(|error| {
-			log::error!(
-				target: "bridge",
-				"Failed to read best finalized source header from source in {} relay: {:?}",
-				relay_task_name,
-				error,
-			);
+	finality_source.on_chain_best_finalized_block_number().await.map_err(|error| {
+		log::error!(
+			target: "bridge",
+			"Failed to read best finalized source header from source in {} relay: {:?}",
+			relay_task_name,
+			error,
+		);
 
-			error
-		})
+		error
+	})
 }
 
 /// Read best finalized source block number from target client.
@@ -373,22 +389,20 @@ async fn best_finalized_source_header_at_target<SourceChain: Chain, TargetChain:
 	relay_task_name: &str,
 ) -> Result<SourceChain::BlockNumber, <SubstrateFinalityTarget<TargetChain, P> as RelayClient>::Error>
 where
-	SubstrateFinalityTarget<TargetChain, P>: FinalityTargetClient<P>,
-	P: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
+	SubstrateFinalityTarget<TargetChain, P>: FinalityTargetClient<P::FinalitySyncPipeline>,
+	P: SubstrateFinalitySyncPipeline,
+	P::FinalitySyncPipeline: FinalitySyncPipeline<Number = SourceChain::BlockNumber>,
 {
-	finality_target
-		.best_finalized_source_block_number()
-		.await
-		.map_err(|error| {
-			log::error!(
-				target: "bridge",
-				"Failed to read best finalized source header from target in {} relay: {:?}",
-				relay_task_name,
-				error,
-			);
+	finality_target.best_finalized_source_block_number().await.map_err(|error| {
+		log::error!(
+			target: "bridge",
+			"Failed to read best finalized source header from target in {} relay: {:?}",
+			relay_task_name,
+			error,
+		);
 
-			error
-		})
+		error
+	})
 }
 
 /// Read first mandatory header in given inclusive range.
@@ -404,9 +418,10 @@ where
 {
 	let mut current = range.0;
 	while current <= range.1 {
-		let header: SyncHeader<SourceChain::Header> = finality_source.client().header_by_number(current).await?.into();
+		let header: SyncHeader<SourceChain::Header> =
+			finality_source.client().header_by_number(current).await?.into();
 		if header.is_mandatory() {
-			return Ok(Some(current));
+			return Ok(Some(current))
 		}
 
 		current += One::one();
@@ -424,15 +439,21 @@ fn on_demand_headers_relay_name<SourceChain: Chain, TargetChain: Chain>() -> Str
 mod tests {
 	use super::*;
 
-	type TestChain = relay_millau_client::Millau;
+	type TestChain = relay_rococo_client::Rococo;
 
-	const AT_SOURCE: Option<bp_millau::BlockNumber> = Some(10);
-	const AT_TARGET: Option<bp_millau::BlockNumber> = Some(1);
+	const AT_SOURCE: Option<bp_rococo::BlockNumber> = Some(10);
+	const AT_TARGET: Option<bp_rococo::BlockNumber> = Some(1);
 
 	#[async_std::test]
 	async fn mandatory_headers_scan_range_selects_range_if_too_many_headers_are_missing() {
 		assert_eq!(
-			mandatory_headers_scan_range::<TestChain>(AT_SOURCE, AT_TARGET, 5, &Arc::new(Mutex::new(0))).await,
+			mandatory_headers_scan_range::<TestChain>(
+				AT_SOURCE,
+				AT_TARGET,
+				5,
+				&Arc::new(Mutex::new(0))
+			)
+			.await,
 			Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())),
 		);
 	}
@@ -440,7 +461,13 @@ mod tests {
 	#[async_std::test]
 	async fn mandatory_headers_scan_range_selects_nothing_if_enough_headers_are_relayed() {
 		assert_eq!(
-			mandatory_headers_scan_range::<TestChain>(AT_SOURCE, AT_TARGET, 10, &Arc::new(Mutex::new(0))).await,
+			mandatory_headers_scan_range::<TestChain>(
+				AT_SOURCE,
+				AT_TARGET,
+				10,
+				&Arc::new(Mutex::new(0))
+			)
+			.await,
 			None,
 		);
 	}
diff --git a/polkadot/bridges/relays/messages/src/lib.rs b/polkadot/bridges/relays/messages/src/lib.rs
index cdd94bca9541260a4705256f67e9ff4c65fda873..c9e460300342f46973fe631382a05793478a87eb 100644
--- a/polkadot/bridges/relays/messages/src/lib.rs
+++ b/polkadot/bridges/relays/messages/src/lib.rs
@@ -18,7 +18,7 @@
 //! data. Message lane allows sending arbitrary messages between bridged chains. This
 //! module provides entrypoint that starts reading messages from given message lane
 //! of source chain and submits proof-of-message-at-source-chain transactions to the
-//! target chain. Additionaly, proofs-of-messages-delivery are sent back from the
+//! target chain. Additionally, proofs-of-messages-delivery are sent back from the
 //! target chain to the source chain.
 
 // required for futures::select!
@@ -29,6 +29,7 @@ mod metrics;
 
 pub mod message_lane;
 pub mod message_lane_loop;
+pub mod relay_strategy;
 
 mod message_race_delivery;
 mod message_race_loop;
diff --git a/polkadot/bridges/relays/messages/src/message_lane.rs b/polkadot/bridges/relays/messages/src/message_lane.rs
index 8757e9322ce4473c230cf34a0e631aac94017d30..2b2d8029fc74b8237efebead2782315af43921e8 100644
--- a/polkadot/bridges/relays/messages/src/message_lane.rs
+++ b/polkadot/bridges/relays/messages/src/message_lane.rs
@@ -21,7 +21,7 @@
 
 use num_traits::{SaturatingAdd, Zero};
 use relay_utils::{BlockNumberBase, HeaderId};
-use std::fmt::Debug;
+use std::{fmt::Debug, ops::Sub};
 
 /// One-way message lane.
 pub trait MessageLane: 'static + Clone + Send + Sync {
@@ -40,7 +40,15 @@ pub trait MessageLane: 'static + Clone + Send + Sync {
 	/// 1) pay transaction fees;
 	/// 2) pay message delivery and dispatch fee;
 	/// 3) pay relayer rewards.
-	type SourceChainBalance: Clone + Copy + Debug + PartialOrd + SaturatingAdd + Zero + Send + Sync;
+	type SourceChainBalance: Clone
+		+ Copy
+		+ Debug
+		+ PartialOrd
+		+ Sub<Output = Self::SourceChainBalance>
+		+ SaturatingAdd
+		+ Zero
+		+ Send
+		+ Sync;
 	/// Number of the source header.
 	type SourceHeaderNumber: BlockNumberBase;
 	/// Hash of the source header.
@@ -53,7 +61,9 @@ pub trait MessageLane: 'static + Clone + Send + Sync {
 }
 
 /// Source header id within given one-way message lane.
-pub type SourceHeaderIdOf<P> = HeaderId<<P as MessageLane>::SourceHeaderHash, <P as MessageLane>::SourceHeaderNumber>;
+pub type SourceHeaderIdOf<P> =
+	HeaderId<<P as MessageLane>::SourceHeaderHash, <P as MessageLane>::SourceHeaderNumber>;
 
 /// Target header id within given one-way message lane.
-pub type TargetHeaderIdOf<P> = HeaderId<<P as MessageLane>::TargetHeaderHash, <P as MessageLane>::TargetHeaderNumber>;
+pub type TargetHeaderIdOf<P> =
+	HeaderId<<P as MessageLane>::TargetHeaderHash, <P as MessageLane>::TargetHeaderNumber>;
diff --git a/polkadot/bridges/relays/messages/src/message_lane_loop.rs b/polkadot/bridges/relays/messages/src/message_lane_loop.rs
index 32c24985a447e4cd58085f14aebf2136cc5256ea..daafea8d68dacbcbb5fd4ebc5aa71c169de81a26 100644
--- a/polkadot/bridges/relays/messages/src/message_lane_loop.rs
+++ b/polkadot/bridges/relays/messages/src/message_lane_loop.rs
@@ -24,15 +24,13 @@
 //! finalized header. I.e. when talking about headers in lane context, we
 //! only care about finalized headers.
 
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_race_delivery::run as run_message_delivery_race;
-use crate::message_race_receiving::run as run_message_receiving_race;
-use crate::metrics::MessageLaneLoopMetrics;
+use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration};
 
 use async_trait::async_trait;
+use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt};
+
 use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight};
 use bp_runtime::messages::DispatchFeePayment;
-use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt};
 use relay_utils::{
 	interval,
 	metrics::{GlobalMetrics, MetricsParams},
@@ -40,11 +38,18 @@ use relay_utils::{
 	relay_loop::Client as RelayClient,
 	retry_backoff, FailedClient,
 };
-use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration};
+
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_race_delivery::run as run_message_delivery_race,
+	message_race_receiving::run as run_message_receiving_race,
+	metrics::MessageLaneLoopMetrics,
+	relay_strategy::RelayStrategy,
+};
 
 /// Message lane loop configuration params.
 #[derive(Debug, Clone)]
-pub struct Params {
+pub struct Params<Strategy: RelayStrategy> {
 	/// Id of lane this loop is servicing.
 	pub lane: LaneId,
 	/// Interval at which we ask target node about its updates.
@@ -56,7 +61,7 @@ pub struct Params {
 	/// The loop will auto-restart if there has been no updates during this period.
 	pub stall_timeout: Duration,
 	/// Message delivery race parameters.
-	pub delivery_params: MessageDeliveryParams,
+	pub delivery_params: MessageDeliveryParams<Strategy>,
 }
 
 /// Relayer operating mode.
@@ -64,20 +69,22 @@ pub struct Params {
 pub enum RelayerMode {
 	/// The relayer doesn't care about rewards.
 	Altruistic,
-	/// The relayer will deliver all messages and confirmations as long as he's not losing any funds.
-	NoLosses,
+	/// The relayer will deliver all messages and confirmations as long as he's not losing any
+	/// funds.
+	Rational,
 }
 
 /// Message delivery race parameters.
 #[derive(Debug, Clone)]
-pub struct MessageDeliveryParams {
-	/// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number of entries
-	/// in the `InboundLaneData::relayers` set, all new messages will be rejected until reward payment will
-	/// be proved (by including outbound lane state to the message delivery transaction).
+pub struct MessageDeliveryParams<Strategy: RelayStrategy> {
+	/// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number
+	/// of entries in the `InboundLaneData::relayers` set, all new messages will be rejected until
+	/// reward payment will be proved (by including outbound lane state to the message delivery
+	/// transaction).
 	pub max_unrewarded_relayer_entries_at_target: MessageNonce,
-	/// Message delivery race will stop delivering messages if there are `max_unconfirmed_nonces_at_target`
-	/// unconfirmed nonces on the target node. The race would continue once they're confirmed by the
-	/// receiving race.
+	/// Message delivery race will stop delivering messages if there are
+	/// `max_unconfirmed_nonces_at_target` unconfirmed nonces on the target node. The race would
+	/// continue once they're confirmed by the receiving race.
 	pub max_unconfirmed_nonces_at_target: MessageNonce,
 	/// Maximal number of relayed messages in single delivery transaction.
 	pub max_messages_in_single_batch: MessageNonce,
@@ -85,8 +92,8 @@ pub struct MessageDeliveryParams {
 	pub max_messages_weight_in_single_batch: Weight,
 	/// Maximal cumulative size of relayed messages in single delivery transaction.
 	pub max_messages_size_in_single_batch: u32,
-	/// Relayer operating mode.
-	pub relayer_mode: RelayerMode,
+	/// Relay strategy
+	pub relay_strategy: Strategy,
 }
 
 /// Message details.
@@ -103,7 +110,8 @@ pub struct MessageDetails<SourceChainBalance> {
 }
 
 /// Messages details map.
-pub type MessageDetailsMap<SourceChainBalance> = BTreeMap<MessageNonce, MessageDetails<SourceChainBalance>>;
+pub type MessageDetailsMap<SourceChainBalance> =
+	BTreeMap<MessageNonce, MessageDetails<SourceChainBalance>>;
 
 /// Message delivery race proof parameters.
 #[derive(Debug, PartialEq)]
@@ -125,6 +133,7 @@ pub trait SourceClient<P: MessageLane>: RelayClient {
 		&self,
 		id: SourceHeaderIdOf<P>,
 	) -> Result<(SourceHeaderIdOf<P>, MessageNonce), Self::Error>;
+
 	/// Get nonce of the latest message, which receiving has been confirmed by the target chain.
 	async fn latest_confirmed_received_nonce(
 		&self,
@@ -175,11 +184,12 @@ pub trait TargetClient<P: MessageLane>: RelayClient {
 		id: TargetHeaderIdOf<P>,
 	) -> Result<(TargetHeaderIdOf<P>, MessageNonce), Self::Error>;
 
-	/// Get nonce of latest confirmed message.
+	/// Get nonce of the latest confirmed message.
 	async fn latest_confirmed_received_nonce(
 		&self,
 		id: TargetHeaderIdOf<P>,
 	) -> Result<(TargetHeaderIdOf<P>, MessageNonce), Self::Error>;
+
 	/// Get state of unrewarded relayers set at the inbound lane.
 	async fn unrewarded_relayers_state(
 		&self,
@@ -210,19 +220,21 @@ pub trait TargetClient<P: MessageLane>: RelayClient {
 	async fn estimate_delivery_transaction_in_source_tokens(
 		&self,
 		nonces: RangeInclusive<MessageNonce>,
+		total_prepaid_nonces: MessageNonce,
 		total_dispatch_weight: Weight,
 		total_size: u32,
-	) -> P::SourceChainBalance;
+	) -> Result<P::SourceChainBalance, Self::Error>;
 }
 
 /// State of the client.
 #[derive(Clone, Debug, Default, PartialEq)]
 pub struct ClientState<SelfHeaderId, PeerHeaderId> {
-	/// Best header id of this chain.
+	/// The best header id of this chain.
 	pub best_self: SelfHeaderId,
 	/// Best finalized header id of this chain.
 	pub best_finalized_self: SelfHeaderId,
-	/// Best finalized header id of the peer chain read at the best block of this chain (at `best_finalized_self`).
+	/// Best finalized header id of the peer chain read at the best block of this chain (at
+	/// `best_finalized_self`).
 	pub best_finalized_peer_at_best_self: PeerHeaderId,
 }
 
@@ -241,50 +253,49 @@ pub struct ClientsState<P: MessageLane> {
 	pub target: Option<TargetClientState<P>>,
 }
 
-/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs sync loop.
+/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs
+/// sync loop.
 pub fn metrics_prefix<P: MessageLane>(lane: &LaneId) -> String {
-	format!(
-		"{}_to_{}_MessageLane_{}",
-		P::SOURCE_NAME,
-		P::TARGET_NAME,
-		hex::encode(lane)
-	)
+	format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, hex::encode(lane))
 }
 
 /// Run message lane service loop.
-pub async fn run<P: MessageLane>(
-	params: Params,
+pub async fn run<P: MessageLane, Strategy: RelayStrategy>(
+	params: Params<Strategy>,
 	source_client: impl SourceClient<P>,
 	target_client: impl TargetClient<P>,
 	metrics_params: MetricsParams,
 	exit_signal: impl Future<Output = ()> + Send + 'static,
-) -> Result<(), String> {
+) -> Result<(), relay_utils::Error> {
 	let exit_signal = exit_signal.shared();
 	relay_utils::relay_loop(source_client, target_client)
 		.reconnect_delay(params.reconnect_delay)
 		.with_metrics(Some(metrics_prefix::<P>(&params.lane)), metrics_params)
-		.loop_metric(|registry, prefix| MessageLaneLoopMetrics::new(registry, prefix))?
-		.standalone_metric(|registry, prefix| GlobalMetrics::new(registry, prefix))?
+		.loop_metric(MessageLaneLoopMetrics::new)?
+		.standalone_metric(GlobalMetrics::new)?
 		.expose()
 		.await?
-		.run(
-			metrics_prefix::<P>(&params.lane),
-			move |source_client, target_client, metrics| {
-				run_until_connection_lost(
-					params.clone(),
-					source_client,
-					target_client,
-					metrics,
-					exit_signal.clone(),
-				)
-			},
-		)
+		.run(metrics_prefix::<P>(&params.lane), move |source_client, target_client, metrics| {
+			run_until_connection_lost(
+				params.clone(),
+				source_client,
+				target_client,
+				metrics,
+				exit_signal.clone(),
+			)
+		})
 		.await
 }
 
-/// Run one-way message delivery loop until connection with target or source node is lost, or exit signal is received.
-async fn run_until_connection_lost<P: MessageLane, SC: SourceClient<P>, TC: TargetClient<P>>(
-	params: Params,
+/// Run one-way message delivery loop until connection with target or source node is lost, or exit
+/// signal is received.
+async fn run_until_connection_lost<
+	P: MessageLane,
+	Strategy: RelayStrategy,
+	SC: SourceClient<P>,
+	TC: TargetClient<P>,
+>(
+	params: Params<Strategy>,
 	source_client: SC,
 	target_client: TC,
 	metrics_msg: Option<MessageLaneLoopMetrics>,
@@ -446,11 +457,16 @@ async fn run_until_connection_lost<P: MessageLane, SC: SourceClient<P>, TC: Targ
 
 #[cfg(test)]
 pub(crate) mod tests {
-	use super::*;
+	use std::sync::Arc;
+
 	use futures::stream::StreamExt;
 	use parking_lot::Mutex;
+
 	use relay_utils::{HeaderId, MaybeConnectionError};
-	use std::sync::Arc;
+
+	use crate::relay_strategy::AltruisticStrategy;
+
+	use super::*;
 
 	pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId {
 		HeaderId(number, number)
@@ -554,7 +570,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_source_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok(data.source_state.clone())
 		}
@@ -566,7 +582,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_source_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.source_latest_generated_nonce))
 		}
@@ -606,11 +622,7 @@ pub(crate) mod tests {
 			nonces: RangeInclusive<MessageNonce>,
 			proof_parameters: MessageProofParameters,
 		) -> Result<
-			(
-				SourceHeaderIdOf<TestMessageLane>,
-				RangeInclusive<MessageNonce>,
-				TestMessagesProof,
-			),
+			(SourceHeaderIdOf<TestMessageLane>, RangeInclusive<MessageNonce>, TestMessagesProof),
 			TestError,
 		> {
 			let mut data = self.data.lock();
@@ -691,7 +703,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok(data.target_state.clone())
 		}
@@ -703,7 +715,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.target_latest_received_nonce))
 		}
@@ -729,7 +741,7 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			Ok((id, data.target_latest_confirmed_received_nonce))
 		}
@@ -750,14 +762,15 @@ pub(crate) mod tests {
 			let mut data = self.data.lock();
 			(self.tick)(&mut *data);
 			if data.is_target_fails {
-				return Err(TestError);
+				return Err(TestError)
 			}
 			data.target_state.best_self =
 				HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1);
 			data.target_state.best_finalized_self = data.target_state.best_self;
 			data.target_latest_received_nonce = *proof.0.end();
 			if let Some(target_latest_confirmed_received_nonce) = proof.1 {
-				data.target_latest_confirmed_received_nonce = target_latest_confirmed_received_nonce;
+				data.target_latest_confirmed_received_nonce =
+					target_latest_confirmed_received_nonce;
 			}
 			data.submitted_messages_proofs.push(proof);
 			Ok(nonces)
@@ -773,12 +786,13 @@ pub(crate) mod tests {
 		async fn estimate_delivery_transaction_in_source_tokens(
 			&self,
 			nonces: RangeInclusive<MessageNonce>,
+			_total_prepaid_nonces: MessageNonce,
 			total_dispatch_weight: Weight,
 			total_size: u32,
-		) -> TestSourceChainBalance {
-			BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1)
-				+ total_dispatch_weight
-				+ total_size as TestSourceChainBalance
+		) -> Result<TestSourceChainBalance, TestError> {
+			Ok(BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) +
+				total_dispatch_weight +
+				total_size as TestSourceChainBalance)
 		}
 	}
 
@@ -791,14 +805,8 @@ pub(crate) mod tests {
 		async_std::task::block_on(async {
 			let data = Arc::new(Mutex::new(data));
 
-			let source_client = TestSourceClient {
-				data: data.clone(),
-				tick: source_tick,
-			};
-			let target_client = TestTargetClient {
-				data: data.clone(),
-				tick: target_tick,
-			};
+			let source_client = TestSourceClient { data: data.clone(), tick: source_tick };
+			let target_client = TestTargetClient { data: data.clone(), tick: target_tick };
 			let _ = run(
 				Params {
 					lane: [0, 0, 0, 0],
@@ -812,7 +820,7 @@ pub(crate) mod tests {
 						max_messages_in_single_batch: 4,
 						max_messages_weight_in_single_batch: 4,
 						max_messages_size_in_single_batch: 4,
-						relayer_mode: RelayerMode::Altruistic,
+						relay_strategy: AltruisticStrategy,
 					},
 				},
 				source_client,
@@ -901,7 +909,10 @@ pub(crate) mod tests {
 				data.source_state.best_finalized_self = data.source_state.best_self;
 				// headers relay must only be started when we need new target headers at source node
 				if data.target_to_source_header_required.is_some() {
-					assert!(data.source_state.best_finalized_peer_at_best_self.0 < data.target_state.best_self.0);
+					assert!(
+						data.source_state.best_finalized_peer_at_best_self.0 <
+							data.target_state.best_self.0
+					);
 					data.target_to_source_header_required = None;
 				}
 				// syncing target headers -> source chain
@@ -918,7 +929,10 @@ pub(crate) mod tests {
 				data.target_state.best_finalized_self = data.target_state.best_self;
 				// headers relay must only be started when we need new source headers at target node
 				if data.source_to_target_header_required.is_some() {
-					assert!(data.target_state.best_finalized_peer_at_best_self.0 < data.source_state.best_self.0);
+					assert!(
+						data.target_state.best_finalized_peer_at_best_self.0 <
+							data.source_state.best_self.0
+					);
 					data.source_to_target_header_required = None;
 				}
 				// syncing source headers -> target chain
diff --git a/polkadot/bridges/relays/messages/src/message_race_delivery.rs b/polkadot/bridges/relays/messages/src/message_race_delivery.rs
index bde09af7068fdc834b965b7bc36a33c9ea3ade4d..1cd2cbd267185c3b7110438dde200c2a25b98239 100644
--- a/polkadot/bridges/relays/messages/src/message_race_delivery.rs
+++ b/polkadot/bridges/relays/messages/src/message_race_delivery.rs
@@ -11,43 +11,41 @@
 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 // GNU General Public License for more details.
 
-//! Message delivery race delivers proof-of-messages from lane.source to lane.target.
+//! Message delivery race delivers proof-of-messages from "lane.source" to "lane.target".
 
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_lane_loop::{
-	MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, RelayerMode,
-	SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient,
-	TargetClientState,
-};
-use crate::message_race_loop::{
-	MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, TargetClient,
-	TargetClientNonces,
-};
-use crate::message_race_strategy::{BasicStrategy, SourceRangesQueue};
-use crate::metrics::MessageLaneLoopMetrics;
+use std::{collections::VecDeque, marker::PhantomData, ops::RangeInclusive, time::Duration};
 
 use async_trait::async_trait;
-use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight};
-use bp_runtime::messages::DispatchFeePayment;
 use futures::stream::FusedStream;
-use num_traits::{SaturatingAdd, Zero};
+
+use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight};
 use relay_utils::FailedClient;
-use std::{
-	collections::VecDeque,
-	marker::PhantomData,
-	ops::{Range, RangeInclusive},
-	time::Duration,
+
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane_loop::{
+		MessageDeliveryParams, MessageDetailsMap, MessageProofParameters,
+		SourceClient as MessageLaneSourceClient, SourceClientState,
+		TargetClient as MessageLaneTargetClient, TargetClientState,
+	},
+	message_race_loop::{
+		MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces,
+		TargetClient, TargetClientNonces,
+	},
+	message_race_strategy::BasicStrategy,
+	metrics::MessageLaneLoopMetrics,
+	relay_strategy::{EnforcementStrategy, RelayMessagesBatchReference, RelayStrategy},
 };
 
 /// Run message delivery race.
-pub async fn run<P: MessageLane>(
+pub async fn run<P: MessageLane, Strategy: RelayStrategy>(
 	source_client: impl MessageLaneSourceClient<P>,
 	source_state_updates: impl FusedStream<Item = SourceClientState<P>>,
 	target_client: impl MessageLaneTargetClient<P>,
 	target_state_updates: impl FusedStream<Item = TargetClientState<P>>,
 	stall_timeout: Duration,
 	metrics_msg: Option<MessageLaneLoopMetrics>,
-	params: MessageDeliveryParams,
+	params: MessageDeliveryParams<Strategy>,
 ) -> Result<(), FailedClient> {
 	crate::message_race_loop::run(
 		MessageDeliveryRaceSource {
@@ -63,15 +61,16 @@ pub async fn run<P: MessageLane>(
 		},
 		target_state_updates,
 		stall_timeout,
-		MessageDeliveryStrategy::<P, _, _> {
+		MessageDeliveryStrategy::<P, Strategy, _, _> {
 			lane_source_client: source_client,
 			lane_target_client: target_client,
-			max_unrewarded_relayer_entries_at_target: params.max_unrewarded_relayer_entries_at_target,
+			max_unrewarded_relayer_entries_at_target: params
+				.max_unrewarded_relayer_entries_at_target,
 			max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target,
 			max_messages_in_single_batch: params.max_messages_in_single_batch,
 			max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch,
 			max_messages_size_in_single_batch: params.max_messages_size_in_single_batch,
-			relayer_mode: params.relayer_mode,
+			relay_strategy: params.relay_strategy,
 			latest_confirmed_nonces_at_source: VecDeque::new(),
 			target_nonces: None,
 			strategy: BasicStrategy::new(),
@@ -121,8 +120,10 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		prev_latest_nonce: MessageNonce,
 	) -> Result<(SourceHeaderIdOf<P>, SourceClientNonces<Self::NoncesRange>), Self::Error> {
-		let (at_block, latest_generated_nonce) = self.client.latest_generated_nonce(at_block).await?;
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, latest_generated_nonce) =
+			self.client.latest_generated_nonce(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
 
 		if let Some(metrics_msg) = self.metrics_msg.as_ref() {
 			metrics_msg.update_source_latest_generated_nonce::<P>(latest_generated_nonce);
@@ -131,7 +132,10 @@ where
 
 		let new_nonces = if latest_generated_nonce > prev_latest_nonce {
 			self.client
-				.generated_message_details(at_block.clone(), prev_latest_nonce + 1..=latest_generated_nonce)
+				.generated_message_details(
+					at_block.clone(),
+					prev_latest_nonce + 1..=latest_generated_nonce,
+				)
 				.await?
 		} else {
 			MessageDetailsMap::new()
@@ -139,10 +143,7 @@ where
 
 		Ok((
 			at_block,
-			SourceClientNonces {
-				new_nonces,
-				confirmed_nonce: Some(latest_confirmed_nonce),
-			},
+			SourceClientNonces { new_nonces, confirmed_nonce: Some(latest_confirmed_nonce) },
 		))
 	}
 
@@ -151,7 +152,8 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		nonces: RangeInclusive<MessageNonce>,
 		proof_parameters: Self::ProofParameters,
-	) -> Result<(SourceHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesProof), Self::Error> {
+	) -> Result<(SourceHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesProof), Self::Error>
+	{
 		self.client.prove_messages(at_block, nonces, proof_parameters).await
 	}
 }
@@ -180,10 +182,13 @@ where
 		&self,
 		at_block: TargetHeaderIdOf<P>,
 		update_metrics: bool,
-	) -> Result<(TargetHeaderIdOf<P>, TargetClientNonces<DeliveryRaceTargetNoncesData>), Self::Error> {
+	) -> Result<(TargetHeaderIdOf<P>, TargetClientNonces<DeliveryRaceTargetNoncesData>), Self::Error>
+	{
 		let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?;
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
-		let (at_block, unrewarded_relayers) = self.client.unrewarded_relayers_state(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, unrewarded_relayers) =
+			self.client.unrewarded_relayers_state(at_block).await?;
 
 		if update_metrics {
 			if let Some(metrics_msg) = self.metrics_msg.as_ref() {
@@ -210,16 +215,14 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		proof: P::MessagesProof,
 	) -> Result<RangeInclusive<MessageNonce>, Self::Error> {
-		self.client
-			.submit_messages_proof(generated_at_block, nonces, proof)
-			.await
+		self.client.submit_messages_proof(generated_at_block, nonces, proof).await
 	}
 }
 
 /// Additional nonces data from the target client used by message delivery race.
 #[derive(Debug, Clone)]
 struct DeliveryRaceTargetNoncesData {
-	/// Latest nonce that we know: (1) has been delivered to us (2) has been confirmed
+	/// The latest nonce that we know: (1) has been delivered to us (2) has been confirmed
 	/// back to the source node (by confirmations race) and (3) relayer has received
 	/// reward for (and this has been confirmed by the message delivery race).
 	confirmed_nonce: MessageNonce,
@@ -228,7 +231,7 @@ struct DeliveryRaceTargetNoncesData {
 }
 
 /// Messages delivery strategy.
-struct MessageDeliveryStrategy<P: MessageLane, SC, TC> {
+struct MessageDeliveryStrategy<P: MessageLane, Strategy: RelayStrategy, SC, TC> {
 	/// The client that is connected to the message lane source node.
 	lane_source_client: SC,
 	/// The client that is connected to the message lane target node.
@@ -244,8 +247,9 @@ struct MessageDeliveryStrategy<P: MessageLane, SC, TC> {
 	/// Maximal messages size in the single delivery transaction.
 	max_messages_size_in_single_batch: u32,
 	/// Relayer operating mode.
-	relayer_mode: RelayerMode,
-	/// Latest confirmed nonces at the source client + the header id where we have first met this nonce.
+	relay_strategy: Strategy,
+	/// Latest confirmed nonces at the source client + the header id where we have first met this
+	/// nonce.
 	latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf<P>, MessageNonce)>,
 	/// Target nonces from the source client.
 	target_nonces: Option<TargetClientNonces<DeliveryRaceTargetNoncesData>>,
@@ -262,37 +266,27 @@ type MessageDeliveryStrategyBase<P> = BasicStrategy<
 	<P as MessageLane>::MessagesProof,
 >;
 
-impl<P: MessageLane, SC, TC> std::fmt::Debug for MessageDeliveryStrategy<P, SC, TC> {
+impl<P: MessageLane, Strategy: RelayStrategy, SC, TC> std::fmt::Debug
+	for MessageDeliveryStrategy<P, Strategy, SC, TC>
+{
 	fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
 		fmt.debug_struct("MessageDeliveryStrategy")
 			.field(
 				"max_unrewarded_relayer_entries_at_target",
 				&self.max_unrewarded_relayer_entries_at_target,
 			)
-			.field(
-				"max_unconfirmed_nonces_at_target",
-				&self.max_unconfirmed_nonces_at_target,
-			)
+			.field("max_unconfirmed_nonces_at_target", &self.max_unconfirmed_nonces_at_target)
 			.field("max_messages_in_single_batch", &self.max_messages_in_single_batch)
-			.field(
-				"max_messages_weight_in_single_batch",
-				&self.max_messages_weight_in_single_batch,
-			)
-			.field(
-				"max_messages_size_in_single_batch",
-				&self.max_messages_size_in_single_batch,
-			)
-			.field(
-				"latest_confirmed_nonces_at_source",
-				&self.latest_confirmed_nonces_at_source,
-			)
+			.field("max_messages_weight_in_single_batch", &self.max_messages_weight_in_single_batch)
+			.field("max_messages_size_in_single_batch", &self.max_messages_size_in_single_batch)
+			.field("latest_confirmed_nonces_at_source", &self.latest_confirmed_nonces_at_source)
 			.field("target_nonces", &self.target_nonces)
 			.field("strategy", &self.strategy)
 			.finish()
 	}
 }
 
-impl<P: MessageLane, SC, TC> MessageDeliveryStrategy<P, SC, TC> {
+impl<P: MessageLane, Strategy: RelayStrategy, SC, TC> MessageDeliveryStrategy<P, Strategy, SC, TC> {
 	/// Returns total weight of all undelivered messages.
 	fn total_queued_dispatch_weight(&self) -> Weight {
 		self.strategy
@@ -304,8 +298,9 @@ impl<P: MessageLane, SC, TC> MessageDeliveryStrategy<P, SC, TC> {
 }
 
 #[async_trait]
-impl<P, SC, TC> RaceStrategy<SourceHeaderIdOf<P>, TargetHeaderIdOf<P>, P::MessagesProof>
-	for MessageDeliveryStrategy<P, SC, TC>
+impl<P, Strategy: RelayStrategy, SC, TC>
+	RaceStrategy<SourceHeaderIdOf<P>, TargetHeaderIdOf<P>, P::MessagesProof>
+	for MessageDeliveryStrategy<P, Strategy, SC, TC>
 where
 	P: MessageLane,
 	SC: MessageLaneSourceClient<P>,
@@ -319,8 +314,12 @@ where
 		self.strategy.is_empty()
 	}
 
-	fn required_source_header_at_target(&self, current_best: &SourceHeaderIdOf<P>) -> Option<SourceHeaderIdOf<P>> {
-		let header_required_for_messages_delivery = self.strategy.required_source_header_at_target(current_best);
+	fn required_source_header_at_target(
+		&self,
+		current_best: &SourceHeaderIdOf<P>,
+	) -> Option<SourceHeaderIdOf<P>> {
+		let header_required_for_messages_delivery =
+			self.strategy.required_source_header_at_target(current_best);
 		let header_required_for_reward_confirmations_delivery =
 			self.latest_confirmed_nonces_at_source.back().map(|(id, _)| id.clone());
 		match (
@@ -371,10 +370,7 @@ where
 		self.target_nonces = Some(target_nonces);
 
 		self.strategy.best_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: nonces.latest_nonce,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () },
 			race_state,
 		)
 	}
@@ -399,14 +395,12 @@ where
 		}
 
 		if let Some(ref mut target_nonces) = self.target_nonces {
-			target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce);
+			target_nonces.latest_nonce =
+				std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce);
 		}
 
 		self.strategy.finalized_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: nonces.latest_nonce,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () },
 			race_state,
 		)
 	}
@@ -428,12 +422,15 @@ where
 		// There's additional condition in the message delivery race: target would reject messages
 		// if there are too much unconfirmed messages at the inbound lane.
 
-		// The receiving race is responsible to deliver confirmations back to the source chain. So if
-		// there's a lot of unconfirmed messages, let's wait until it'll be able to do its job.
+		// The receiving race is responsible to deliver confirmations back to the source chain. So
+		// if there's a lot of unconfirmed messages, let's wait until it'll be able to do its job.
 		let latest_received_nonce_at_target = target_nonces.latest_nonce;
-		let confirmations_missing = latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source);
+		let confirmations_missing =
+			latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source);
 		match confirmations_missing {
-			Some(confirmations_missing) if confirmations_missing >= self.max_unconfirmed_nonces_at_target => {
+			Some(confirmations_missing)
+				if confirmations_missing >= self.max_unconfirmed_nonces_at_target =>
+			{
 				log::debug!(
 					target: "bridge",
 					"Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \
@@ -445,50 +442,55 @@ where
 					self.max_unconfirmed_nonces_at_target,
 				);
 
-				return None;
-			}
+				return None
+			},
 			_ => (),
 		}
 
-		// Ok - we may have new nonces to deliver. But target may still reject new messages, because we haven't
-		// notified it that (some) messages have been confirmed. So we may want to include updated
-		// `source.latest_confirmed` in the proof.
+		// Ok - we may have new nonces to deliver. But target may still reject new messages, because
+		// we haven't notified it that (some) messages have been confirmed. So we may want to
+		// include updated `source.latest_confirmed` in the proof.
 		//
-		// Important note: we're including outbound state lane proof whenever there are unconfirmed nonces
-		// on the target chain. Other strategy is to include it only if it's absolutely necessary.
+		// Important note: we're including outbound state lane proof whenever there are unconfirmed
+		// nonces on the target chain. Other strategy is to include it only if it's absolutely
+		// necessary.
 		let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce;
-		let outbound_state_proof_required = latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source;
+		let outbound_state_proof_required =
+			latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source;
 
 		// The target node would also reject messages if there are too many entries in the
 		// "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then
 		// we should wait for confirmations race.
 		let unrewarded_relayer_entries_limit_reached =
-			target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries
-				>= self.max_unrewarded_relayer_entries_at_target;
+			target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries >=
+				self.max_unrewarded_relayer_entries_at_target;
 		if unrewarded_relayer_entries_limit_reached {
 			// so there are already too many unrewarded relayer entries in the set
 			//
-			// => check if we can prove enough rewards. If not, we should wait for more rewards to be paid
+			// => check if we can prove enough rewards. If not, we should wait for more rewards to
+			// be paid
 			let number_of_rewards_being_proved =
 				latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target);
-			let enough_rewards_being_proved = number_of_rewards_being_proved
-				>= target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry;
+			let enough_rewards_being_proved = number_of_rewards_being_proved >=
+				target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry;
 			if !enough_rewards_being_proved {
-				return None;
+				return None
 			}
 		}
 
-		// If we're here, then the confirmations race did its job && sending side now knows that messages
-		// have been delivered. Now let's select nonces that we want to deliver.
+		// If we're here, then the confirmations race did its job && sending side now knows that
+		// messages have been delivered. Now let's select nonces that we want to deliver.
 		//
 		// We may deliver at most:
 		//
-		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_target)
+		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target -
+		// latest_confirmed_nonce_at_target)
 		//
-		// messages in the batch. But since we're including outbound state proof in the batch, then it
-		// may be increased to:
+		// messages in the batch. But since we're including outbound state proof in the batch, then
+		// it may be increased to:
 		//
-		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - latest_confirmed_nonce_at_source)
+		// max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target -
+		// latest_confirmed_nonce_at_source)
 		let future_confirmed_nonce_at_target = if outbound_state_proof_required {
 			latest_confirmed_nonce_at_source
 		} else {
@@ -501,24 +503,26 @@ where
 		let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch);
 		let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch;
 		let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch;
-		let relayer_mode = self.relayer_mode;
 		let lane_source_client = self.lane_source_client.clone();
 		let lane_target_client = self.lane_target_client.clone();
 
-		let maximal_source_queue_index = self.strategy.maximal_available_source_queue_index(race_state)?;
+		let maximal_source_queue_index =
+			self.strategy.maximal_available_source_queue_index(race_state)?;
 		let previous_total_dispatch_weight = self.total_queued_dispatch_weight();
 		let source_queue = self.strategy.source_queue();
-		let range_end = select_nonces_for_delivery_transaction(
-			relayer_mode,
-			max_nonces,
+
+		let reference = RelayMessagesBatchReference {
+			max_messages_in_this_batch: max_nonces,
 			max_messages_weight_in_single_batch,
 			max_messages_size_in_single_batch,
-			lane_source_client.clone(),
-			lane_target_client.clone(),
-			source_queue,
-			0..maximal_source_queue_index + 1,
-		)
-		.await?;
+			lane_source_client: lane_source_client.clone(),
+			lane_target_client: lane_target_client.clone(),
+			nonces_queue: source_queue.clone(),
+			nonces_queue_range: 0..maximal_source_queue_index + 1,
+		};
+
+		let strategy = EnforcementStrategy::new(self.relay_strategy.clone());
+		let range_end = strategy.decide(reference).await?;
 
 		let range_begin = source_queue[0].1.begin();
 		let selected_nonces = range_begin..=range_end;
@@ -529,207 +533,11 @@ where
 
 		Some((
 			selected_nonces,
-			MessageProofParameters {
-				outbound_state_proof_required,
-				dispatch_weight,
-			},
+			MessageProofParameters { outbound_state_proof_required, dispatch_weight },
 		))
 	}
 }
 
-/// From given set of source nonces, that are ready to be delivered, select nonces
-/// to fit into single delivery transaction.
-///
-/// The function returns nonces that are NOT selected for current batch and will be
-/// delivered later.
-#[allow(clippy::too_many_arguments)]
-async fn select_nonces_for_delivery_transaction<P: MessageLane>(
-	relayer_mode: RelayerMode,
-	max_messages_in_this_batch: MessageNonce,
-	max_messages_weight_in_single_batch: Weight,
-	max_messages_size_in_single_batch: u32,
-	lane_source_client: impl MessageLaneSourceClient<P>,
-	lane_target_client: impl MessageLaneTargetClient<P>,
-	nonces_queue: &SourceRangesQueue<
-		P::SourceHeaderHash,
-		P::SourceHeaderNumber,
-		MessageDetailsMap<P::SourceChainBalance>,
-	>,
-	nonces_queue_range: Range<usize>,
-) -> Option<MessageNonce> {
-	let mut hard_selected_count = 0;
-	let mut soft_selected_count = 0;
-
-	let mut selected_weight: Weight = 0;
-	let mut selected_unpaid_weight: Weight = 0;
-	let mut selected_size: u32 = 0;
-	let mut selected_count: MessageNonce = 0;
-
-	let mut total_reward = P::SourceChainBalance::zero();
-	let mut total_confirmations_cost = P::SourceChainBalance::zero();
-	let mut total_cost = P::SourceChainBalance::zero();
-
-	// technically, multiple confirmations will be delivered in a single transaction,
-	// meaning less loses for relayer. But here we don't know the final relayer yet, so
-	// we're adding a separate transaction for every message. Normally, this cost is covered
-	// by the message sender. Probably reconsider this?
-	let confirmation_transaction_cost = if relayer_mode != RelayerMode::Altruistic {
-		lane_source_client.estimate_confirmation_transaction().await
-	} else {
-		Zero::zero()
-	};
-
-	let all_ready_nonces = nonces_queue
-		.range(nonces_queue_range.clone())
-		.flat_map(|(_, ready_nonces)| ready_nonces.iter())
-		.enumerate();
-	for (index, (nonce, details)) in all_ready_nonces {
-		// Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch`
-		// and `max_messages_size_in_single_batch`, we may still try to submit transaction
-		// with single message if message overflows these limits. The worst case would be if
-		// transaction will be rejected by the target runtime, but at least we have tried.
-
-		// limit messages in the batch by weight
-		let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) {
-			Some(new_selected_weight) if new_selected_weight <= max_messages_weight_in_single_batch => {
-				new_selected_weight
-			}
-			new_selected_weight if selected_count == 0 => {
-				log::warn!(
-					target: "bridge",
-					"Going to submit message delivery transaction with declared dispatch \
-					weight {:?} that overflows maximal configured weight {}",
-					new_selected_weight,
-					max_messages_weight_in_single_batch,
-				);
-				new_selected_weight.unwrap_or(Weight::MAX)
-			}
-			_ => break,
-		};
-
-		// limit messages in the batch by size
-		let new_selected_size = match selected_size.checked_add(details.size) {
-			Some(new_selected_size) if new_selected_size <= max_messages_size_in_single_batch => new_selected_size,
-			new_selected_size if selected_count == 0 => {
-				log::warn!(
-					target: "bridge",
-					"Going to submit message delivery transaction with message \
-					size {:?} that overflows maximal configured size {}",
-					new_selected_size,
-					max_messages_size_in_single_batch,
-				);
-				new_selected_size.unwrap_or(u32::MAX)
-			}
-			_ => break,
-		};
-
-		// limit number of messages in the batch
-		let new_selected_count = selected_count + 1;
-		if new_selected_count > max_messages_in_this_batch {
-			break;
-		}
-
-		// If dispatch fee has been paid at the source chain, it means that it is **relayer** who's
-		// paying for dispatch at the target chain AND reward must cover this dispatch fee.
-		//
-		// If dispatch fee is paid at the target chain, it means that it'll be withdrawn from the
-		// dispatch origin account AND reward is not covering this fee.
-		//
-		// So in the latter case we're not adding the dispatch weight to the delivery transaction weight.
-		let new_selected_unpaid_weight = match details.dispatch_fee_payment {
-			DispatchFeePayment::AtSourceChain => selected_unpaid_weight.saturating_add(details.dispatch_weight),
-			DispatchFeePayment::AtTargetChain => selected_unpaid_weight,
-		};
-
-		// now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT
-		// to deliver it? It depends on the relayer strategy.
-		match relayer_mode {
-			RelayerMode::Altruistic => {
-				soft_selected_count = index + 1;
-			}
-			RelayerMode::NoLosses => {
-				let delivery_transaction_cost = lane_target_client
-					.estimate_delivery_transaction_in_source_tokens(
-						0..=(new_selected_count as MessageNonce - 1),
-						new_selected_unpaid_weight,
-						new_selected_size as u32,
-					)
-					.await;
-
-				// if it is the first message that makes reward less than cost, let's log it
-				// if this message makes batch profitable again, let's log it
-				let is_total_reward_less_than_cost = total_reward < total_cost;
-				let prev_total_cost = total_cost;
-				let prev_total_reward = total_reward;
-				total_confirmations_cost = total_confirmations_cost.saturating_add(&confirmation_transaction_cost);
-				total_reward = total_reward.saturating_add(&details.reward);
-				total_cost = total_confirmations_cost.saturating_add(&delivery_transaction_cost);
-				if !is_total_reward_less_than_cost && total_reward < total_cost {
-					log::debug!(
-						target: "bridge",
-						"Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \
-						total reward {:?}->{:?}",
-						nonce,
-						details.reward,
-						prev_total_cost,
-						total_cost,
-						prev_total_reward,
-						total_reward,
-					);
-				} else if is_total_reward_less_than_cost && total_reward >= total_cost {
-					log::debug!(
-						target: "bridge",
-						"Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \
-						equal to the total reward {:?}->{:?} (again)",
-						nonce,
-						details.reward,
-						prev_total_cost,
-						total_cost,
-						prev_total_reward,
-						total_reward,
-					);
-				}
-
-				// NoLosses relayer never want to lose his funds
-				if total_reward >= total_cost {
-					soft_selected_count = index + 1;
-				}
-			}
-		}
-
-		hard_selected_count = index + 1;
-		selected_weight = new_selected_weight;
-		selected_unpaid_weight = new_selected_unpaid_weight;
-		selected_size = new_selected_size;
-		selected_count = new_selected_count;
-	}
-
-	let hard_selected_begin_nonce = nonces_queue[nonces_queue_range.start].1.begin();
-	if hard_selected_count != soft_selected_count {
-		let hard_selected_end_nonce = hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1;
-		let soft_selected_begin_nonce = hard_selected_begin_nonce;
-		let soft_selected_end_nonce = soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1;
-		log::warn!(
-			target: "bridge",
-			"Relayer may deliver nonces [{:?}; {:?}], but because of its strategy ({:?}) it has selected \
-			nonces [{:?}; {:?}].",
-			hard_selected_begin_nonce,
-			hard_selected_end_nonce,
-			relayer_mode,
-			soft_selected_begin_nonce,
-			soft_selected_end_nonce,
-		);
-
-		hard_selected_count = soft_selected_count;
-	}
-
-	if hard_selected_count != 0 {
-		Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1)
-	} else {
-		None
-	}
-}
-
 impl<SourceChainBalance: std::fmt::Debug> NoncesRange for MessageDetailsMap<SourceChainBalance> {
 	fn begin(&self) -> MessageNonce {
 		self.keys().next().cloned().unwrap_or_default()
@@ -751,26 +559,32 @@ impl<SourceChainBalance: std::fmt::Debug> NoncesRange for MessageDetailsMap<Sour
 
 #[cfg(test)]
 mod tests {
-	use super::*;
-	use crate::message_lane_loop::{
-		tests::{
-			header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance, TestSourceClient,
-			TestSourceHeaderId, TestTargetClient, TestTargetHeaderId, BASE_MESSAGE_DELIVERY_TRANSACTION_COST,
-			CONFIRMATION_TRANSACTION_COST,
+	use bp_runtime::messages::DispatchFeePayment;
+
+	use crate::{
+		message_lane_loop::{
+			tests::{
+				header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance,
+				TestSourceClient, TestSourceHeaderId, TestTargetClient, TestTargetHeaderId,
+				BASE_MESSAGE_DELIVERY_TRANSACTION_COST, CONFIRMATION_TRANSACTION_COST,
+			},
+			MessageDetails, RelayerMode,
 		},
-		MessageDetails,
+		relay_strategy::MixStrategy,
 	};
-	use bp_runtime::messages::DispatchFeePayment::*;
+
+	use super::*;
 
 	const DEFAULT_DISPATCH_WEIGHT: Weight = 1;
 	const DEFAULT_SIZE: u32 = 1;
-	const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST
-		+ BASE_MESSAGE_DELIVERY_TRANSACTION_COST
-		+ DEFAULT_DISPATCH_WEIGHT
-		+ (DEFAULT_SIZE as TestSourceChainBalance);
+	const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST +
+		BASE_MESSAGE_DELIVERY_TRANSACTION_COST +
+		DEFAULT_DISPATCH_WEIGHT +
+		(DEFAULT_SIZE as TestSourceChainBalance);
 
 	type TestRaceState = RaceState<TestSourceHeaderId, TestTargetHeaderId, TestMessagesProof>;
-	type TestStrategy = MessageDeliveryStrategy<TestMessageLane, TestSourceClient, TestTargetClient>;
+	type TestStrategy =
+		MessageDeliveryStrategy<TestMessageLane, MixStrategy, TestSourceClient, TestTargetClient>;
 
 	fn source_nonces(
 		new_nonces: RangeInclusive<MessageNonce>,
@@ -809,7 +623,6 @@ mod tests {
 		};
 
 		let mut race_strategy = TestStrategy {
-			relayer_mode: RelayerMode::Altruistic,
 			max_unrewarded_relayer_entries_at_target: 4,
 			max_unconfirmed_nonces_at_target: 4,
 			max_messages_in_single_batch: 4,
@@ -830,16 +643,15 @@ mod tests {
 				},
 			}),
 			strategy: BasicStrategy::new(),
+			relay_strategy: MixStrategy::new(RelayerMode::Altruistic),
 		};
 
-		race_strategy
-			.strategy
-			.source_nonces_updated(header_id(1), source_nonces(20..=23, 19, DEFAULT_REWARD, AtSourceChain));
+		race_strategy.strategy.source_nonces_updated(
+			header_id(1),
+			source_nonces(20..=23, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain),
+		);
 
-		let target_nonces = TargetClientNonces {
-			latest_nonce: 19,
-			nonces_data: (),
-		};
+		let target_nonces = TargetClientNonces { latest_nonce: 19, nonces_data: () };
 		race_strategy
 			.strategy
 			.best_target_nonces_updated(target_nonces.clone(), &mut race_state);
@@ -859,7 +671,9 @@ mod tests {
 
 	#[test]
 	fn weights_map_works_as_nonces_range() {
-		fn build_map(range: RangeInclusive<MessageNonce>) -> MessageDetailsMap<TestSourceChainBalance> {
+		fn build_map(
+			range: RangeInclusive<MessageNonce>,
+		) -> MessageDetailsMap<TestSourceChainBalance> {
 			range
 				.map(|idx| {
 					(
@@ -868,7 +682,7 @@ mod tests {
 							dispatch_weight: idx,
 							size: idx as _,
 							reward: idx as _,
-							dispatch_fee_payment: AtSourceChain,
+							dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
 						},
 					)
 				})
@@ -906,7 +720,8 @@ mod tests {
 		// we need to wait until confirmations will be delivered by receiving race
 		strategy.latest_confirmed_nonces_at_source = vec![(
 			header_id(1),
-			strategy.target_nonces.as_ref().unwrap().latest_nonce - strategy.max_unconfirmed_nonces_at_target,
+			strategy.target_nonces.as_ref().unwrap().latest_nonce -
+				strategy.max_unconfirmed_nonces_at_target,
 		)]
 		.into_iter()
 		.collect();
@@ -914,13 +729,16 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() {
+	async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available()
+	{
 		let (state, mut strategy) = prepare_strategy();
 
 		// if there are new confirmed nonces on source, we want to relay this information
 		// to target to prune rewards queue
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=23), proof_parameters(true, 4)))
@@ -934,8 +752,10 @@ mod tests {
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to wait until rewards will be paid
 		{
-			let mut unrewarded_relayers = &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			let mut unrewarded_relayers =
+				&mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 4;
 		}
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
@@ -948,12 +768,14 @@ mod tests {
 
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to prove at least `messages_in_oldest_entry` rewards
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		{
 			let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data;
 			nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
 			let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 4;
 		}
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
@@ -965,12 +787,14 @@ mod tests {
 
 		// if there are already `max_unrewarded_relayer_entries_at_target` entries at target,
 		// we need to prove at least `messages_in_oldest_entry` rewards
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		{
 			let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data;
 			nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3;
 			let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers;
-			unrewarded_relayers.unrewarded_relayer_entries = strategy.max_unrewarded_relayer_entries_at_target;
+			unrewarded_relayers.unrewarded_relayer_entries =
+				strategy.max_unrewarded_relayer_entries_at_target;
 			unrewarded_relayers.messages_in_oldest_entry = 3;
 		}
 		assert_eq!(
@@ -992,15 +816,13 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight() {
+	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
-		// first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4)
-		strategy.strategy.source_queue_mut()[0]
-			.1
-			.get_mut(&20)
-			.unwrap()
-			.dispatch_weight = 10;
+		// first message doesn't fit in the batch, because it has weight (10) that overflows max
+		// weight (4)
+		strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().dispatch_weight = 10;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=20), proof_parameters(false, 10)))
@@ -1020,10 +842,12 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size() {
+	async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
-		// first message doesn't fit in the batch, because it has weight (10) that overflows max weight (4)
+		// first message doesn't fit in the batch, because it has weight (10) that overflows max
+		// weight (4)
 		strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
@@ -1035,7 +859,8 @@ mod tests {
 	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() {
 		let (state, mut strategy) = prepare_strategy();
 
-		// not all queued messages may fit in the batch, because batch has max number of messages limit
+		// not all queued messages may fit in the batch, because batch has max number of messages
+		// limit
 		strategy.max_messages_in_single_batch = 3;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
@@ -1044,16 +869,18 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces() {
+	async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces(
+	) {
 		let (state, mut strategy) = prepare_strategy();
 
 		// 1 delivery confirmation from target to source is still missing, so we may only
 		// relay 3 new messages
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
-		strategy.latest_confirmed_nonces_at_source = vec![(header_id(1), prev_confirmed_nonce_at_source - 1)]
-			.into_iter()
-			.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		strategy.latest_confirmed_nonces_at_source =
+			vec![(header_id(1), prev_confirmed_nonce_at_source - 1)].into_iter().collect();
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=22), proof_parameters(false, 3)))
@@ -1068,30 +895,35 @@ mod tests {
 		//
 		// => so we can't deliver more than 3 messages
 		let (mut state, mut strategy) = prepare_strategy();
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		strategy.latest_confirmed_nonces_at_source = vec![
 			(header_id(1), prev_confirmed_nonce_at_source - 1),
 			(header_id(2), prev_confirmed_nonce_at_source),
 		]
 		.into_iter()
 		.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(1));
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state).await,
 			Some(((20..=22), proof_parameters(false, 3)))
 		);
 
-		// the same situation, but the header 2 is known to the target node, so we may deliver reward confirmation
+		// the same situation, but the header 2 is known to the target node, so we may deliver
+		// reward confirmation
 		let (mut state, mut strategy) = prepare_strategy();
-		let prev_confirmed_nonce_at_source = strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
+		let prev_confirmed_nonce_at_source =
+			strategy.latest_confirmed_nonces_at_source.back().unwrap().1;
 		strategy.latest_confirmed_nonces_at_source = vec![
 			(header_id(1), prev_confirmed_nonce_at_source - 1),
 			(header_id(2), prev_confirmed_nonce_at_source),
 		]
 		.into_iter()
 		.collect();
-		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1;
+		strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce =
+			prev_confirmed_nonce_at_source - 1;
 		state.best_finalized_source_header_id_at_source = Some(header_id(2));
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(2));
 		assert_eq!(
@@ -1105,8 +937,9 @@ mod tests {
 		// let's prepare situation when:
 		// - all messages [20; 23] have been generated at source block#1;
 		let (mut state, mut strategy) = prepare_strategy();
-		// - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because of unrewarded
-		//   relayers vector capacity;
+		//
+		// - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because
+		//   of unrewarded relayers vector capacity;
 		strategy.max_unconfirmed_nonces_at_target = 2;
 		assert_eq!(
 			strategy.select_nonces_to_deliver(state.clone()).await,
@@ -1127,25 +960,21 @@ mod tests {
 			&mut state,
 		);
 		assert_eq!(strategy.select_nonces_to_deliver(state).await, None);
+		//
 		// - messages [1; 10] receiving confirmation has been delivered at source block#2;
 		strategy.source_nonces_updated(
 			header_id(2),
-			SourceClientNonces {
-				new_nonces: MessageDetailsMap::new(),
-				confirmed_nonce: Some(21),
-			},
+			SourceClientNonces { new_nonces: MessageDetailsMap::new(), confirmed_nonce: Some(21) },
 		);
+		//
 		// - so now we'll need to relay source block#11 to be able to accept messages [11; 20].
-		assert_eq!(
-			strategy.required_source_header_at_target(&header_id(1)),
-			Some(header_id(2))
-		);
+		assert_eq!(strategy.required_source_header_at_target(&header_id(1)), Some(header_id(2)));
 	}
 
 	#[async_std::test]
-	async fn no_losses_relayer_is_delivering_messages_if_cost_is_equal_to_reward() {
+	async fn rational_relayer_is_delivering_messages_if_cost_is_equal_to_reward() {
 		let (state, mut strategy) = prepare_strategy();
-		strategy.relayer_mode = RelayerMode::NoLosses;
+		strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational);
 
 		// so now we have:
 		// - 20..=23 with reward = cost
@@ -1157,17 +986,17 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn no_losses_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() {
+	async fn rational_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() {
 		let (mut state, mut strategy) = prepare_strategy();
 		let nonces = source_nonces(
 			24..=25,
 			19,
 			DEFAULT_REWARD - BASE_MESSAGE_DELIVERY_TRANSACTION_COST,
-			AtSourceChain,
+			DispatchFeePayment::AtSourceChain,
 		);
 		strategy.strategy.source_nonces_updated(header_id(2), nonces);
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(2));
-		strategy.relayer_mode = RelayerMode::NoLosses;
+		strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational);
 
 		// so now we have:
 		// - 20..=23 with reward = cost
@@ -1180,7 +1009,7 @@ mod tests {
 	}
 
 	#[async_std::test]
-	async fn no_losses_relayer_is_delivering_unpaid_messages() {
+	async fn rational_relayer_is_delivering_unpaid_messages() {
 		async fn test_with_dispatch_fee_payment(
 			dispatch_fee_payment: DispatchFeePayment,
 		) -> Option<(RangeInclusive<MessageNonce>, MessageProofParameters)> {
@@ -1198,23 +1027,23 @@ mod tests {
 			strategy.max_messages_in_single_batch = 100;
 			strategy.max_messages_weight_in_single_batch = 100;
 			strategy.max_messages_size_in_single_batch = 100;
-			strategy.relayer_mode = RelayerMode::NoLosses;
+			strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational);
 
 			// so now we have:
 			// - 20..=23 with reward = cost
-			// - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` from the
-			//   cost, so it should be fine;
+			// - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT`
+			//   from the cost, so it should be fine;
 			// => when MSG#24 fee is paid at the target chain, strategy shall select all 20..=24
 			// => when MSG#25 fee is paid at the source chain, strategy shall only select 20..=23
 			strategy.select_nonces_to_deliver(state).await
 		}
 
 		assert_eq!(
-			test_with_dispatch_fee_payment(AtTargetChain).await,
+			test_with_dispatch_fee_payment(DispatchFeePayment::AtTargetChain).await,
 			Some(((20..=24), proof_parameters(false, 5)))
 		);
 		assert_eq!(
-			test_with_dispatch_fee_payment(AtSourceChain).await,
+			test_with_dispatch_fee_payment(DispatchFeePayment::AtSourceChain).await,
 			Some(((20..=23), proof_parameters(false, 4)))
 		);
 	}
@@ -1224,13 +1053,13 @@ mod tests {
 		// Real scenario that has happened on test deployments:
 		// 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue`
 		// 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue`
-		// 3) if block 2 is known to the target node, then both M1 and M2 are selected for single delivery,
-		//    even though weight(M1+M2) > larger than largest allowed weight
+		// 3) if block 2 is known to the target node, then both M1 and M2 are selected for single
+		// delivery,    even though weight(M1+M2) > larger than largest allowed weight
 		//
-		// This was happening because selector (`select_nonces_for_delivery_transaction`) has been called
-		// for every `source_queue` entry separately without preserving any context.
+		// This was happening because selector (`select_nonces_for_delivery_transaction`) has been
+		// called for every `source_queue` entry separately without preserving any context.
 		let (mut state, mut strategy) = prepare_strategy();
-		let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, AtSourceChain);
+		let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain);
 		strategy.strategy.source_nonces_updated(header_id(2), nonces);
 		strategy.max_unrewarded_relayer_entries_at_target = 100;
 		strategy.max_unconfirmed_nonces_at_target = 100;
diff --git a/polkadot/bridges/relays/messages/src/message_race_loop.rs b/polkadot/bridges/relays/messages/src/message_race_loop.rs
index 3b427a2d0e27f28102b27197e35310f26731e284..a7254f70ee4a472757bf0a77eef4cf82fb8924c3 100644
--- a/polkadot/bridges/relays/messages/src/message_race_loop.rs
+++ b/polkadot/bridges/relays/messages/src/message_race_loop.rs
@@ -54,10 +54,12 @@ pub trait MessageRace {
 }
 
 /// State of race source client.
-type SourceClientState<P> = ClientState<<P as MessageRace>::SourceHeaderId, <P as MessageRace>::TargetHeaderId>;
+type SourceClientState<P> =
+	ClientState<<P as MessageRace>::SourceHeaderId, <P as MessageRace>::TargetHeaderId>;
 
 /// State of race target client.
-type TargetClientState<P> = ClientState<<P as MessageRace>::TargetHeaderId, <P as MessageRace>::SourceHeaderId>;
+type TargetClientState<P> =
+	ClientState<<P as MessageRace>::TargetHeaderId, <P as MessageRace>::SourceHeaderId>;
 
 /// Inclusive nonces range.
 pub trait NoncesRange: Debug + Sized {
@@ -76,7 +78,7 @@ pub struct SourceClientNonces<NoncesRange> {
 	/// New nonces range known to the client. `New` here means all nonces generated after
 	/// `prev_latest_nonce` passed to the `SourceClient::nonces` method.
 	pub new_nonces: NoncesRange,
-	/// Latest nonce that is confirmed to the bridged client. This nonce only makes
+	/// The latest nonce that is confirmed to the bridged client. This nonce only makes
 	/// sense in some races. In other races it is `None`.
 	pub confirmed_nonce: Option<MessageNonce>,
 }
@@ -84,7 +86,7 @@ pub struct SourceClientNonces<NoncesRange> {
 /// Nonces on the race target client.
 #[derive(Debug, Clone)]
 pub struct TargetClientNonces<TargetNoncesData> {
-	/// Latest nonce that is known to the target client.
+	/// The latest nonce that is known to the target client.
 	pub latest_nonce: MessageNonce,
 	/// Additional data from target node that may be used by the race.
 	pub nonces_data: TargetNoncesData,
@@ -93,7 +95,7 @@ pub struct TargetClientNonces<TargetNoncesData> {
 /// One of message lane clients, which is source client for the race.
 #[async_trait]
 pub trait SourceClient<P: MessageRace> {
-	/// Type of error this clients returns.
+	/// Type of error these clients returns.
 	type Error: std::fmt::Debug + MaybeConnectionError;
 	/// Type of nonces range returned by the source client.
 	type NoncesRange: NoncesRange;
@@ -118,7 +120,7 @@ pub trait SourceClient<P: MessageRace> {
 /// One of message lane clients, which is target client for the race.
 #[async_trait]
 pub trait TargetClient<P: MessageRace> {
-	/// Type of error this clients returns.
+	/// Type of error these clients returns.
 	type Error: std::fmt::Debug + MaybeConnectionError;
 	/// Type of the additional data from the target client, used by the race.
 	type TargetNoncesData: std::fmt::Debug;
@@ -155,19 +157,26 @@ pub trait RaceStrategy<SourceHeaderId, TargetHeaderId, Proof>: Debug {
 	/// Should return true if nothing has to be synced.
 	fn is_empty(&self) -> bool;
 	/// Return id of source header that is required to be on target to continue synchronization.
-	fn required_source_header_at_target(&self, current_best: &SourceHeaderId) -> Option<SourceHeaderId>;
-	/// Return best nonce at source node.
+	fn required_source_header_at_target(
+		&self,
+		current_best: &SourceHeaderId,
+	) -> Option<SourceHeaderId>;
+	/// Return the best nonce at source node.
 	///
 	/// `Some` is returned only if we are sure that the value is greater or equal
 	/// than the result of `best_at_target`.
 	fn best_at_source(&self) -> Option<MessageNonce>;
-	/// Return best nonce at target node.
+	/// Return the best nonce at target node.
 	///
 	/// May return `None` if value is yet unknown.
 	fn best_at_target(&self) -> Option<MessageNonce>;
 
 	/// Called when nonces are updated at source node of the race.
-	fn source_nonces_updated(&mut self, at_block: SourceHeaderId, nonces: SourceClientNonces<Self::SourceNoncesRange>);
+	fn source_nonces_updated(
+		&mut self,
+		at_block: SourceHeaderId,
+		nonces: SourceClientNonces<Self::SourceNoncesRange>,
+	);
 	/// Called when best nonces are updated at target node of the race.
 	fn best_target_nonces_updated(
 		&mut self,
@@ -197,7 +206,7 @@ pub struct RaceState<SourceHeaderId, TargetHeaderId, Proof> {
 	/// Best finalized source header id at the best block on the target
 	/// client (at the `best_finalized_source_header_id_at_best_target`).
 	pub best_finalized_source_header_id_at_best_target: Option<SourceHeaderId>,
-	/// Best header id at the target client.
+	/// The best header id at the target client.
 	pub best_target_header_id: Option<TargetHeaderId>,
 	/// Best finalized header id at the target client.
 	pub best_finalized_target_header_id: Option<TargetHeaderId>,
@@ -430,8 +439,10 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 				strategy,
 			);
 
-			return Err(FailedClient::Both);
-		} else if race_state.nonces_to_submit.is_none() && race_state.nonces_submitted.is_none() && strategy.is_empty()
+			return Err(FailedClient::Both)
+		} else if race_state.nonces_to_submit.is_none() &&
+			race_state.nonces_submitted.is_none() &&
+			strategy.is_empty()
 		{
 			stall_countdown = Instant::now();
 		}
@@ -439,7 +450,8 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 		if source_client_is_online {
 			source_client_is_online = false;
 
-			let nonces_to_deliver = select_nonces_to_deliver(race_state.clone(), &mut strategy).await;
+			let nonces_to_deliver =
+				select_nonces_to_deliver(race_state.clone(), &mut strategy).await;
 			let best_at_source = strategy.best_at_source();
 
 			if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver {
@@ -451,9 +463,7 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 					at_block,
 				);
 				source_generate_proof.set(
-					race_source
-						.generate_proof(at_block, nonces_range, proof_parameters)
-						.fuse(),
+					race_source.generate_proof(at_block, nonces_range, proof_parameters).fuse(),
 				);
 			} else if source_nonces_required && best_at_source.is_some() {
 				log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name());
@@ -516,7 +526,9 @@ pub async fn run<P: MessageRace, SC: SourceClient<P>, TC: TargetClient<P>>(
 	}
 }
 
-impl<SourceHeaderId, TargetHeaderId, Proof> Default for RaceState<SourceHeaderId, TargetHeaderId, Proof> {
+impl<SourceHeaderId, TargetHeaderId, Proof> Default
+	for RaceState<SourceHeaderId, TargetHeaderId, Proof>
+{
 	fn default() -> Self {
 		RaceState {
 			best_finalized_source_header_id_at_source: None,
@@ -539,7 +551,7 @@ where
 
 	let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10);
 	if !need_update {
-		return prev_time;
+		return prev_time
 	}
 
 	let now_best_nonce_at_source = strategy.best_at_source();
@@ -569,11 +581,7 @@ where
 		.select_nonces_to_deliver(race_state)
 		.await
 		.map(|(nonces_range, proof_parameters)| {
-			(
-				best_finalized_source_header_id_at_best_target,
-				nonces_range,
-				proof_parameters,
-			)
+			(best_finalized_source_header_id_at_best_target, nonces_range, proof_parameters)
 		})
 }
 
@@ -592,8 +600,14 @@ mod tests {
 		// target node only knows about source' BEST_AT_TARGET block
 		// source node has BEST_AT_SOURCE > BEST_AT_TARGET block
 		let mut race_state = RaceState::<_, _, ()> {
-			best_finalized_source_header_id_at_source: Some(HeaderId(BEST_AT_SOURCE, BEST_AT_SOURCE)),
-			best_finalized_source_header_id_at_best_target: Some(HeaderId(BEST_AT_TARGET, BEST_AT_TARGET)),
+			best_finalized_source_header_id_at_source: Some(HeaderId(
+				BEST_AT_SOURCE,
+				BEST_AT_SOURCE,
+			)),
+			best_finalized_source_header_id_at_best_target: Some(HeaderId(
+				BEST_AT_TARGET,
+				BEST_AT_TARGET,
+			)),
 			best_target_header_id: Some(HeaderId(0, 0)),
 			best_finalized_target_header_id: Some(HeaderId(0, 0)),
 			nonces_to_submit: None,
@@ -604,16 +618,10 @@ mod tests {
 		let mut strategy = BasicStrategy::new();
 		strategy.source_nonces_updated(
 			HeaderId(GENERATED_AT, GENERATED_AT),
-			SourceClientNonces {
-				new_nonces: 0..=10,
-				confirmed_nonce: None,
-			},
+			SourceClientNonces { new_nonces: 0..=10, confirmed_nonce: None },
 		);
 		strategy.best_target_nonces_updated(
-			TargetClientNonces {
-				latest_nonce: 5u64,
-				nonces_data: (),
-			},
+			TargetClientNonces { latest_nonce: 5u64, nonces_data: () },
 			&mut race_state,
 		);
 
diff --git a/polkadot/bridges/relays/messages/src/message_race_receiving.rs b/polkadot/bridges/relays/messages/src/message_race_receiving.rs
index 4381b63591f718b8fe8301443bffff153a23232e..5aa36cbd9c6dcf76fe86c1c70479ab5deb55deb6 100644
--- a/polkadot/bridges/relays/messages/src/message_race_receiving.rs
+++ b/polkadot/bridges/relays/messages/src/message_race_receiving.rs
@@ -11,18 +11,21 @@
 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 // GNU General Public License for more details.
 
-//! Message receiving race delivers proof-of-messages-delivery from lane.target to lane.source.
-
-use crate::message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf};
-use crate::message_lane_loop::{
-	SourceClient as MessageLaneSourceClient, SourceClientState, TargetClient as MessageLaneTargetClient,
-	TargetClientState,
-};
-use crate::message_race_loop::{
-	MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, TargetClientNonces,
+//! Message receiving race delivers proof-of-messages-delivery from "lane.target" to "lane.source".
+
+use crate::{
+	message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf},
+	message_lane_loop::{
+		SourceClient as MessageLaneSourceClient, SourceClientState,
+		TargetClient as MessageLaneTargetClient, TargetClientState,
+	},
+	message_race_loop::{
+		MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient,
+		TargetClientNonces,
+	},
+	message_race_strategy::BasicStrategy,
+	metrics::MessageLaneLoopMetrics,
 };
-use crate::message_race_strategy::BasicStrategy;
-use crate::metrics::MessageLaneLoopMetrics;
 
 use async_trait::async_trait;
 use bp_messages::MessageNonce;
@@ -129,11 +132,7 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		_proof_parameters: Self::ProofParameters,
 	) -> Result<
-		(
-			TargetHeaderIdOf<P>,
-			RangeInclusive<MessageNonce>,
-			P::MessagesReceivingProof,
-		),
+		(TargetHeaderIdOf<P>, RangeInclusive<MessageNonce>, P::MessagesReceivingProof),
 		Self::Error,
 	> {
 		self.client
@@ -168,19 +167,14 @@ where
 		at_block: SourceHeaderIdOf<P>,
 		update_metrics: bool,
 	) -> Result<(SourceHeaderIdOf<P>, TargetClientNonces<()>), Self::Error> {
-		let (at_block, latest_confirmed_nonce) = self.client.latest_confirmed_received_nonce(at_block).await?;
+		let (at_block, latest_confirmed_nonce) =
+			self.client.latest_confirmed_received_nonce(at_block).await?;
 		if update_metrics {
 			if let Some(metrics_msg) = self.metrics_msg.as_ref() {
 				metrics_msg.update_source_latest_confirmed_nonce::<P>(latest_confirmed_nonce);
 			}
 		}
-		Ok((
-			at_block,
-			TargetClientNonces {
-				latest_nonce: latest_confirmed_nonce,
-				nonces_data: (),
-			},
-		))
+		Ok((at_block, TargetClientNonces { latest_nonce: latest_confirmed_nonce, nonces_data: () }))
 	}
 
 	async fn submit_proof(
@@ -189,9 +183,7 @@ where
 		nonces: RangeInclusive<MessageNonce>,
 		proof: P::MessagesReceivingProof,
 	) -> Result<RangeInclusive<MessageNonce>, Self::Error> {
-		self.client
-			.submit_messages_receiving_proof(generated_at_block, proof)
-			.await?;
+		self.client.submit_messages_receiving_proof(generated_at_block, proof).await?;
 		Ok(nonces)
 	}
 }
diff --git a/polkadot/bridges/relays/messages/src/message_race_strategy.rs b/polkadot/bridges/relays/messages/src/message_race_strategy.rs
index ff5c1eda012d69801b7d06e41f2937300edea396..4ecf451deb07e0b5d46f171c160267a529480e47 100644
--- a/polkadot/bridges/relays/messages/src/message_race_strategy.rs
+++ b/polkadot/bridges/relays/messages/src/message_race_strategy.rs
@@ -17,7 +17,9 @@
 //! 2) new nonces may be proved to target node (i.e. they have appeared at the
 //!    block, which is known to the target node).
 
-use crate::message_race_loop::{NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces};
+use crate::message_race_loop::{
+	NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces,
+};
 
 use async_trait::async_trait;
 use bp_messages::MessageNonce;
@@ -40,15 +42,29 @@ pub struct BasicStrategy<
 > {
 	/// All queued nonces.
 	source_queue: SourceRangesQueue<SourceHeaderHash, SourceHeaderNumber, SourceNoncesRange>,
-	/// Best nonce known to target node (at its best block). `None` if it has not been received yet.
+	/// The best nonce known to target node (at its best block). `None` if it has not been received
+	/// yet.
 	best_target_nonce: Option<MessageNonce>,
 	/// Unused generic types dump.
 	_phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>,
 }
 
-impl<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-	BasicStrategy<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-where
+impl<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	>
+	BasicStrategy<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	> where
 	SourceHeaderHash: Clone,
 	SourceHeaderNumber: Clone + Ord,
 	SourceNoncesRange: NoncesRange,
@@ -79,9 +95,9 @@ where
 
 	/// Returns index of the latest source queue entry, that may be delivered to the target node.
 	///
-	/// Returns `None` if no entries may be delivered. All entries before and including the `Some(_)`
-	/// index are guaranteed to be witnessed at source blocks that are known to be finalized at the
-	/// target node.
+	/// Returns `None` if no entries may be delivered. All entries before and including the
+	/// `Some(_)` index are guaranteed to be witnessed at source blocks that are known to be
+	/// finalized at the target node.
 	pub fn maximal_available_source_queue_index(
 		&self,
 		race_state: RaceState<
@@ -95,12 +111,12 @@ where
 
 		// if we have already selected nonces that we want to submit, do nothing
 		if race_state.nonces_to_submit.is_some() {
-			return None;
+			return None
 		}
 
 		// if we already submitted some nonces, do nothing
 		if race_state.nonces_submitted.is_some() {
-			return None;
+			return None
 		}
 
 		// 1) we want to deliver all nonces, starting from `target_nonce + 1`
@@ -124,17 +140,34 @@ where
 		while let Some((queued_at, queued_range)) = self.source_queue.pop_front() {
 			if let Some(range_to_requeue) = queued_range.greater_than(nonce) {
 				self.source_queue.push_front((queued_at, range_to_requeue));
-				break;
+				break
 			}
 		}
 	}
 }
 
 #[async_trait]
-impl<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-	RaceStrategy<HeaderId<SourceHeaderHash, SourceHeaderNumber>, HeaderId<TargetHeaderHash, TargetHeaderNumber>, Proof>
-	for BasicStrategy<SourceHeaderNumber, SourceHeaderHash, TargetHeaderNumber, TargetHeaderHash, SourceNoncesRange, Proof>
-where
+impl<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	>
+	RaceStrategy<
+		HeaderId<SourceHeaderHash, SourceHeaderNumber>,
+		HeaderId<TargetHeaderHash, TargetHeaderNumber>,
+		Proof,
+	>
+	for BasicStrategy<
+		SourceHeaderNumber,
+		SourceHeaderHash,
+		TargetHeaderNumber,
+		TargetHeaderHash,
+		SourceNoncesRange,
+		Proof,
+	> where
 	SourceHeaderHash: Clone + Debug + Send,
 	SourceHeaderNumber: Clone + Ord + Debug + Send,
 	SourceNoncesRange: NoncesRange + Debug + Send,
@@ -162,7 +195,8 @@ where
 	fn best_at_source(&self) -> Option<MessageNonce> {
 		let best_in_queue = self.source_queue.back().map(|(_, range)| range.end());
 		match (best_in_queue, self.best_target_nonce) {
-			(Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => Some(best_in_queue),
+			(Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce =>
+				Some(best_in_queue),
 			(_, Some(best_target_nonce)) => Some(best_target_nonce),
 			(_, None) => None,
 		}
@@ -205,18 +239,17 @@ where
 
 		if let Some(best_target_nonce) = self.best_target_nonce {
 			if nonce < best_target_nonce {
-				return;
+				return
 			}
 		}
 
 		while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) {
-			let maybe_subrange = self
-				.source_queue
-				.pop_front()
-				.and_then(|(at_block, range)| range.greater_than(nonce).map(|subrange| (at_block, subrange)));
+			let maybe_subrange = self.source_queue.pop_front().and_then(|(at_block, range)| {
+				range.greater_than(nonce).map(|subrange| (at_block, subrange))
+			});
 			if let Some((at_block, subrange)) = maybe_subrange {
 				self.source_queue.push_front((at_block, subrange));
-				break;
+				break
 			}
 		}
 
@@ -238,10 +271,8 @@ where
 			race_state.nonces_submitted = None;
 		}
 
-		self.best_target_nonce = Some(std::cmp::max(
-			self.best_target_nonce.unwrap_or(nonces.latest_nonce),
-			nonce,
-		));
+		self.best_target_nonce =
+			Some(std::cmp::max(self.best_target_nonce.unwrap_or(nonces.latest_nonce), nonce));
 	}
 
 	fn finalized_target_nonces_updated(
@@ -278,9 +309,12 @@ where
 #[cfg(test)]
 mod tests {
 	use super::*;
-	use crate::message_lane::MessageLane;
-	use crate::message_lane_loop::tests::{
-		header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, TestSourceHeaderNumber,
+	use crate::{
+		message_lane::MessageLane,
+		message_lane_loop::tests::{
+			header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash,
+			TestSourceHeaderNumber,
+		},
 	};
 
 	type SourceNoncesRange = RangeInclusive<MessageNonce>;
@@ -295,17 +329,11 @@ mod tests {
 	>;
 
 	fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces<SourceNoncesRange> {
-		SourceClientNonces {
-			new_nonces,
-			confirmed_nonce: None,
-		}
+		SourceClientNonces { new_nonces, confirmed_nonce: None }
 	}
 
 	fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> {
-		TargetClientNonces {
-			latest_nonce,
-			nonces_data: (),
-		}
+		TargetClientNonces { latest_nonce, nonces_data: () }
 	}
 
 	#[test]
@@ -420,18 +448,12 @@ mod tests {
 		strategy.source_nonces_updated(header_id(5), source_nonces(7..=8));
 
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(4));
-		assert_eq!(
-			strategy.select_nonces_to_deliver(state.clone()).await,
-			Some((1..=6, ()))
-		);
+		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=6, ())));
 		strategy.best_target_nonces_updated(target_nonces(6), &mut state);
 		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None);
 
 		state.best_finalized_source_header_id_at_best_target = Some(header_id(5));
-		assert_eq!(
-			strategy.select_nonces_to_deliver(state.clone()).await,
-			Some((7..=8, ()))
-		);
+		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((7..=8, ())));
 		strategy.best_target_nonces_updated(target_nonces(8), &mut state);
 		assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None);
 	}
@@ -471,16 +493,17 @@ mod tests {
 		strategy.source_nonces_updated(header_id(3), source_nonces(7..=9));
 
 		fn source_queue_nonces(
-			source_queue: &SourceRangesQueue<TestSourceHeaderHash, TestSourceHeaderNumber, SourceNoncesRange>,
+			source_queue: &SourceRangesQueue<
+				TestSourceHeaderHash,
+				TestSourceHeaderNumber,
+				SourceNoncesRange,
+			>,
 		) -> Vec<MessageNonce> {
 			source_queue.iter().flat_map(|(_, range)| range.clone()).collect()
 		}
 
 		strategy.remove_le_nonces_from_source_queue(1);
-		assert_eq!(
-			source_queue_nonces(&strategy.source_queue),
-			vec![2, 3, 4, 5, 6, 7, 8, 9],
-		);
+		assert_eq!(source_queue_nonces(&strategy.source_queue), vec![2, 3, 4, 5, 6, 7, 8, 9],);
 
 		strategy.remove_le_nonces_from_source_queue(5);
 		assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9]);
diff --git a/polkadot/bridges/relays/messages/src/metrics.rs b/polkadot/bridges/relays/messages/src/metrics.rs
index 51a4118be8582a5a692e6563b14d903e99b678dd..8d6e480722e66b6f04bcdc95368932b154a5fdac 100644
--- a/polkadot/bridges/relays/messages/src/metrics.rs
+++ b/polkadot/bridges/relays/messages/src/metrics.rs
@@ -16,8 +16,10 @@
 
 //! Metrics for message lane relay loop.
 
-use crate::message_lane::MessageLane;
-use crate::message_lane_loop::{SourceClientState, TargetClientState};
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{SourceClientState, TargetClientState},
+};
 
 use bp_messages::MessageNonce;
 use relay_utils::metrics::{metric_name, register, GaugeVec, Opts, PrometheusError, Registry, U64};
@@ -81,28 +83,40 @@ impl MessageLaneLoopMetrics {
 	}
 
 	/// Update latest generated nonce at source.
-	pub fn update_source_latest_generated_nonce<P: MessageLane>(&self, source_latest_generated_nonce: MessageNonce) {
+	pub fn update_source_latest_generated_nonce<P: MessageLane>(
+		&self,
+		source_latest_generated_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["source_latest_generated"])
 			.set(source_latest_generated_nonce);
 	}
 
-	/// Update latest confirmed nonce at source.
-	pub fn update_source_latest_confirmed_nonce<P: MessageLane>(&self, source_latest_confirmed_nonce: MessageNonce) {
+	/// Update the latest confirmed nonce at source.
+	pub fn update_source_latest_confirmed_nonce<P: MessageLane>(
+		&self,
+		source_latest_confirmed_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["source_latest_confirmed"])
 			.set(source_latest_confirmed_nonce);
 	}
 
-	/// Update latest received nonce at target.
-	pub fn update_target_latest_received_nonce<P: MessageLane>(&self, target_latest_generated_nonce: MessageNonce) {
+	/// Update the latest received nonce at target.
+	pub fn update_target_latest_received_nonce<P: MessageLane>(
+		&self,
+		target_latest_generated_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["target_latest_received"])
 			.set(target_latest_generated_nonce);
 	}
 
-	/// Update latest confirmed nonce at target.
-	pub fn update_target_latest_confirmed_nonce<P: MessageLane>(&self, target_latest_confirmed_nonce: MessageNonce) {
+	/// Update the latest confirmed nonce at target.
+	pub fn update_target_latest_confirmed_nonce<P: MessageLane>(
+		&self,
+		target_latest_confirmed_nonce: MessageNonce,
+	) {
 		self.lane_state_nonces
 			.with_label_values(&["target_latest_confirmed"])
 			.set(target_latest_confirmed_nonce);
diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f932b796b0dee1d148c32ac728b541861b791c1c
--- /dev/null
+++ b/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs
@@ -0,0 +1,45 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Altruistic relay strategy
+
+use async_trait::async_trait;
+
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{
+		SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient,
+	},
+	relay_strategy::{RelayReference, RelayStrategy},
+};
+
+/// The relayer doesn't care about rewards.
+#[derive(Clone)]
+pub struct AltruisticStrategy;
+
+#[async_trait]
+impl RelayStrategy for AltruisticStrategy {
+	async fn decide<
+		P: MessageLane,
+		SourceClient: MessageLaneSourceClient<P>,
+		TargetClient: MessageLaneTargetClient<P>,
+	>(
+		&self,
+		_reference: &mut RelayReference<P, SourceClient, TargetClient>,
+	) -> bool {
+		true
+	}
+}
diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs
new file mode 100644
index 0000000000000000000000000000000000000000..042c05bec00aded4585b7b26dd0f3a144e62d766
--- /dev/null
+++ b/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs
@@ -0,0 +1,219 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! enforcement strategy
+
+use num_traits::Zero;
+
+use bp_messages::{MessageNonce, Weight};
+use bp_runtime::messages::DispatchFeePayment;
+
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{
+		MessageDetails, SourceClient as MessageLaneSourceClient,
+		TargetClient as MessageLaneTargetClient,
+	},
+	message_race_loop::NoncesRange,
+	relay_strategy::{RelayMessagesBatchReference, RelayReference, RelayStrategy},
+};
+
+/// Do hard check and run soft check strategy
+#[derive(Clone)]
+pub struct EnforcementStrategy<Strategy: RelayStrategy> {
+	strategy: Strategy,
+}
+
+impl<Strategy: RelayStrategy> EnforcementStrategy<Strategy> {
+	pub fn new(strategy: Strategy) -> Self {
+		Self { strategy }
+	}
+}
+
+impl<Strategy: RelayStrategy> EnforcementStrategy<Strategy> {
+	pub async fn decide<
+		P: MessageLane,
+		SourceClient: MessageLaneSourceClient<P>,
+		TargetClient: MessageLaneTargetClient<P>,
+	>(
+		&self,
+		reference: RelayMessagesBatchReference<P, SourceClient, TargetClient>,
+	) -> Option<MessageNonce> {
+		let mut hard_selected_count = 0;
+		let mut soft_selected_count = 0;
+
+		let mut selected_weight: Weight = 0;
+		let mut selected_count: MessageNonce = 0;
+
+		let hard_selected_begin_nonce =
+			reference.nonces_queue[reference.nonces_queue_range.start].1.begin();
+
+		// relay reference
+		let mut relay_reference = RelayReference {
+			lane_source_client: reference.lane_source_client.clone(),
+			lane_target_client: reference.lane_target_client.clone(),
+
+			selected_reward: P::SourceChainBalance::zero(),
+			selected_cost: P::SourceChainBalance::zero(),
+			selected_size: 0,
+
+			total_reward: P::SourceChainBalance::zero(),
+			total_confirmations_cost: P::SourceChainBalance::zero(),
+			total_cost: P::SourceChainBalance::zero(),
+
+			hard_selected_begin_nonce,
+			selected_prepaid_nonces: 0,
+			selected_unpaid_weight: 0,
+
+			index: 0,
+			nonce: 0,
+			details: MessageDetails {
+				dispatch_weight: 0,
+				size: 0,
+				reward: P::SourceChainBalance::zero(),
+				dispatch_fee_payment: DispatchFeePayment::AtSourceChain,
+			},
+		};
+
+		let all_ready_nonces = reference
+			.nonces_queue
+			.range(reference.nonces_queue_range.clone())
+			.flat_map(|(_, ready_nonces)| ready_nonces.iter())
+			.enumerate();
+		for (index, (nonce, details)) in all_ready_nonces {
+			relay_reference.index = index;
+			relay_reference.nonce = *nonce;
+			relay_reference.details = *details;
+
+			// Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch`
+			// and `max_messages_size_in_single_batch`, we may still try to submit transaction
+			// with single message if message overflows these limits. The worst case would be if
+			// transaction will be rejected by the target runtime, but at least we have tried.
+
+			// limit messages in the batch by weight
+			let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) {
+				Some(new_selected_weight)
+					if new_selected_weight <= reference.max_messages_weight_in_single_batch =>
+					new_selected_weight,
+				new_selected_weight if selected_count == 0 => {
+					log::warn!(
+						target: "bridge",
+						"Going to submit message delivery transaction with declared dispatch \
+						weight {:?} that overflows maximal configured weight {}",
+						new_selected_weight,
+						reference.max_messages_weight_in_single_batch,
+					);
+					new_selected_weight.unwrap_or(Weight::MAX)
+				},
+				_ => break,
+			};
+
+			// limit messages in the batch by size
+			let new_selected_size = match relay_reference.selected_size.checked_add(details.size) {
+				Some(new_selected_size)
+					if new_selected_size <= reference.max_messages_size_in_single_batch =>
+					new_selected_size,
+				new_selected_size if selected_count == 0 => {
+					log::warn!(
+						target: "bridge",
+						"Going to submit message delivery transaction with message \
+						size {:?} that overflows maximal configured size {}",
+						new_selected_size,
+						reference.max_messages_size_in_single_batch,
+					);
+					new_selected_size.unwrap_or(u32::MAX)
+				},
+				_ => break,
+			};
+
+			// limit number of messages in the batch
+			let new_selected_count = selected_count + 1;
+			if new_selected_count > reference.max_messages_in_this_batch {
+				break
+			}
+			relay_reference.selected_size = new_selected_size;
+
+			// If dispatch fee has been paid at the source chain, it means that it is **relayer**
+			// who's paying for dispatch at the target chain AND reward must cover this dispatch
+			// fee.
+			//
+			// If dispatch fee is paid at the target chain, it means that it'll be withdrawn from
+			// the dispatch origin account AND reward is not covering this fee.
+			//
+			// So in the latter case we're not adding the dispatch weight to the delivery
+			// transaction weight.
+			let mut new_selected_prepaid_nonces = relay_reference.selected_prepaid_nonces;
+			let new_selected_unpaid_weight = match details.dispatch_fee_payment {
+				DispatchFeePayment::AtSourceChain => {
+					new_selected_prepaid_nonces += 1;
+					relay_reference.selected_unpaid_weight.saturating_add(details.dispatch_weight)
+				},
+				DispatchFeePayment::AtTargetChain => relay_reference.selected_unpaid_weight,
+			};
+			relay_reference.selected_prepaid_nonces = new_selected_prepaid_nonces;
+			relay_reference.selected_unpaid_weight = new_selected_unpaid_weight;
+
+			// now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT
+			// to deliver it? It depends on the relayer strategy.
+			if self.strategy.decide(&mut relay_reference).await {
+				soft_selected_count = index + 1;
+			}
+
+			hard_selected_count = index + 1;
+			selected_weight = new_selected_weight;
+			selected_count = new_selected_count;
+		}
+
+		if hard_selected_count != soft_selected_count {
+			let hard_selected_end_nonce =
+				hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1;
+			let soft_selected_begin_nonce = hard_selected_begin_nonce;
+			let soft_selected_end_nonce =
+				soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1;
+			log::warn!(
+				target: "bridge",
+				"Relayer may deliver nonces [{:?}; {:?}], but because of its strategy it has selected \
+				nonces [{:?}; {:?}].",
+				hard_selected_begin_nonce,
+				hard_selected_end_nonce,
+				soft_selected_begin_nonce,
+				soft_selected_end_nonce,
+			);
+
+			hard_selected_count = soft_selected_count;
+		}
+
+		if hard_selected_count != 0 {
+			if relay_reference.selected_reward != P::SourceChainBalance::zero() &&
+				relay_reference.selected_cost != P::SourceChainBalance::zero()
+			{
+				log::trace!(
+					target: "bridge",
+					"Expected reward from delivering nonces [{:?}; {:?}] is: {:?} - {:?} = {:?}",
+					hard_selected_begin_nonce,
+					hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1,
+					&relay_reference.selected_reward,
+					&relay_reference.selected_cost,
+					relay_reference.selected_reward - relay_reference.selected_cost,
+				);
+			}
+
+			Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1)
+		} else {
+			None
+		}
+	}
+}
diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs
new file mode 100644
index 0000000000000000000000000000000000000000..a267d8ca5f5a86e6dfcdfe6c96f88c8b7abc4f70
--- /dev/null
+++ b/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs
@@ -0,0 +1,58 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Adapter for using `enum RelayerMode` in a context which requires `RelayStrategy`.
+
+use async_trait::async_trait;
+
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{
+		RelayerMode, SourceClient as MessageLaneSourceClient,
+		TargetClient as MessageLaneTargetClient,
+	},
+	relay_strategy::{AltruisticStrategy, RationalStrategy, RelayReference, RelayStrategy},
+};
+
+/// `RelayerMode` adapter.
+#[derive(Clone)]
+pub struct MixStrategy {
+	relayer_mode: RelayerMode,
+}
+
+impl MixStrategy {
+	/// Create mix strategy instance
+	pub fn new(relayer_mode: RelayerMode) -> Self {
+		Self { relayer_mode }
+	}
+}
+
+#[async_trait]
+impl RelayStrategy for MixStrategy {
+	async fn decide<
+		P: MessageLane,
+		SourceClient: MessageLaneSourceClient<P>,
+		TargetClient: MessageLaneTargetClient<P>,
+	>(
+		&self,
+		reference: &mut RelayReference<P, SourceClient, TargetClient>,
+	) -> bool {
+		match self.relayer_mode {
+			RelayerMode::Altruistic => AltruisticStrategy.decide(reference).await,
+			RelayerMode::Rational => RationalStrategy.decide(reference).await,
+		}
+	}
+}
diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs
new file mode 100644
index 0000000000000000000000000000000000000000..3e4eef8975dd866b2eda0cec61c46752f5cd21e2
--- /dev/null
+++ b/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs
@@ -0,0 +1,123 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Relayer strategy
+
+use std::ops::Range;
+
+use async_trait::async_trait;
+
+use bp_messages::{MessageNonce, Weight};
+
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{
+		MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient,
+		TargetClient as MessageLaneTargetClient,
+	},
+	message_race_strategy::SourceRangesQueue,
+};
+
+pub(crate) use self::enforcement_strategy::*;
+pub use self::{altruistic_strategy::*, mix_strategy::*, rational_strategy::*};
+
+mod altruistic_strategy;
+mod enforcement_strategy;
+mod mix_strategy;
+mod rational_strategy;
+
+/// Relayer strategy trait
+#[async_trait]
+pub trait RelayStrategy: 'static + Clone + Send + Sync {
+	/// The relayer decide how to process nonce by reference.
+	/// From given set of source nonces, that are ready to be delivered, select nonces
+	/// to fit into single delivery transaction.
+	///
+	/// The function returns last nonce that must be delivered to the target chain.
+	async fn decide<
+		P: MessageLane,
+		SourceClient: MessageLaneSourceClient<P>,
+		TargetClient: MessageLaneTargetClient<P>,
+	>(
+		&self,
+		reference: &mut RelayReference<P, SourceClient, TargetClient>,
+	) -> bool;
+}
+
+/// Reference data for participating in relay
+pub struct RelayReference<
+	P: MessageLane,
+	SourceClient: MessageLaneSourceClient<P>,
+	TargetClient: MessageLaneTargetClient<P>,
+> {
+	/// The client that is connected to the message lane source node.
+	pub lane_source_client: SourceClient,
+	/// The client that is connected to the message lane target node.
+	pub lane_target_client: TargetClient,
+	/// Current block reward summary
+	pub selected_reward: P::SourceChainBalance,
+	/// Current block cost summary
+	pub selected_cost: P::SourceChainBalance,
+	/// Messages size summary
+	pub selected_size: u32,
+
+	/// Current block reward summary
+	pub total_reward: P::SourceChainBalance,
+	/// All confirmations cost
+	pub total_confirmations_cost: P::SourceChainBalance,
+	/// Current block cost summary
+	pub total_cost: P::SourceChainBalance,
+
+	/// Hard check begin nonce
+	pub hard_selected_begin_nonce: MessageNonce,
+	/// Count prepaid nonces
+	pub selected_prepaid_nonces: MessageNonce,
+	/// Unpaid nonces weight summary
+	pub selected_unpaid_weight: Weight,
+
+	/// Index by all ready nonces
+	pub index: usize,
+	/// Current nonce
+	pub nonce: MessageNonce,
+	/// Current nonce details
+	pub details: MessageDetails<P::SourceChainBalance>,
+}
+
+/// Relay reference data
+pub struct RelayMessagesBatchReference<
+	P: MessageLane,
+	SourceClient: MessageLaneSourceClient<P>,
+	TargetClient: MessageLaneTargetClient<P>,
+> {
+	/// Maximal number of relayed messages in single delivery transaction.
+	pub max_messages_in_this_batch: MessageNonce,
+	/// Maximal cumulative dispatch weight of relayed messages in single delivery transaction.
+	pub max_messages_weight_in_single_batch: Weight,
+	/// Maximal cumulative size of relayed messages in single delivery transaction.
+	pub max_messages_size_in_single_batch: u32,
+	/// The client that is connected to the message lane source node.
+	pub lane_source_client: SourceClient,
+	/// The client that is connected to the message lane target node.
+	pub lane_target_client: TargetClient,
+	/// Source queue.
+	pub nonces_queue: SourceRangesQueue<
+		P::SourceHeaderHash,
+		P::SourceHeaderNumber,
+		MessageDetailsMap<P::SourceChainBalance>,
+	>,
+	/// Source queue range
+	pub nonces_queue_range: Range<usize>,
+}
diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs
new file mode 100644
index 0000000000000000000000000000000000000000..dc408ffd49e258c32dcfde6ac402aa6814fb2fec
--- /dev/null
+++ b/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs
@@ -0,0 +1,122 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+//! Rational relay strategy
+
+use async_trait::async_trait;
+use num_traits::SaturatingAdd;
+
+use bp_messages::MessageNonce;
+
+use crate::{
+	message_lane::MessageLane,
+	message_lane_loop::{
+		SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient,
+	},
+	relay_strategy::{RelayReference, RelayStrategy},
+};
+
+/// The relayer will deliver all messages and confirmations as long as he's not losing any
+/// funds.
+#[derive(Clone)]
+pub struct RationalStrategy;
+
+#[async_trait]
+impl RelayStrategy for RationalStrategy {
+	async fn decide<
+		P: MessageLane,
+		SourceClient: MessageLaneSourceClient<P>,
+		TargetClient: MessageLaneTargetClient<P>,
+	>(
+		&self,
+		reference: &mut RelayReference<P, SourceClient, TargetClient>,
+	) -> bool {
+		// technically, multiple confirmations will be delivered in a single transaction,
+		// meaning less loses for relayer. But here we don't know the final relayer yet, so
+		// we're adding a separate transaction for every message. Normally, this cost is covered
+		// by the message sender. Probably reconsider this?
+		let confirmation_transaction_cost =
+			reference.lane_source_client.estimate_confirmation_transaction().await;
+
+		let delivery_transaction_cost = match reference
+			.lane_target_client
+			.estimate_delivery_transaction_in_source_tokens(
+				reference.hard_selected_begin_nonce..=
+					(reference.hard_selected_begin_nonce + reference.index as MessageNonce),
+				reference.selected_prepaid_nonces,
+				reference.selected_unpaid_weight,
+				reference.selected_size as u32,
+			)
+			.await
+		{
+			Ok(v) => v,
+			Err(err) => {
+				log::debug!(
+					target: "bridge",
+					"Failed to estimate delivery transaction cost: {:?}. No nonces selected for delivery",
+					err,
+				);
+				return false
+			},
+		};
+
+		// if it is the first message that makes reward less than cost, let's log it
+		// if this message makes batch profitable again, let's log it
+		let is_total_reward_less_than_cost = reference.total_reward < reference.total_cost;
+		let prev_total_cost = reference.total_cost;
+		let prev_total_reward = reference.total_reward;
+		reference.total_confirmations_cost = reference
+			.total_confirmations_cost
+			.saturating_add(&confirmation_transaction_cost);
+		reference.total_reward = reference.total_reward.saturating_add(&reference.details.reward);
+		reference.total_cost =
+			reference.total_confirmations_cost.saturating_add(&delivery_transaction_cost);
+		if !is_total_reward_less_than_cost && reference.total_reward < reference.total_cost {
+			log::debug!(
+				target: "bridge",
+				"Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \
+				total reward {:?}->{:?}",
+				reference.nonce,
+				reference.details.reward,
+				prev_total_cost,
+				reference.total_cost,
+				prev_total_reward,
+				reference.total_reward,
+			);
+		} else if is_total_reward_less_than_cost && reference.total_reward >= reference.total_cost {
+			log::debug!(
+				target: "bridge",
+				"Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \
+				equal to the total reward {:?}->{:?} (again)",
+				reference.nonce,
+				reference.details.reward,
+				prev_total_cost,
+				reference.total_cost,
+				prev_total_reward,
+				reference.total_reward,
+			);
+		}
+
+		// Rational relayer never want to lose his funds
+		if reference.total_reward >= reference.total_cost {
+			reference.selected_reward = reference.total_reward;
+			reference.selected_cost = reference.total_cost;
+			return true
+		}
+
+		false
+	}
+}
diff --git a/polkadot/bridges/relays/utils/Cargo.toml b/polkadot/bridges/relays/utils/Cargo.toml
index ff80cab5338131ecc0c1becb3edb9c1d327f10ae..a08c3b3d688df07361b0c09872a38858de911c70 100644
--- a/polkadot/bridges/relays/utils/Cargo.toml
+++ b/polkadot/bridges/relays/utils/Cargo.toml
@@ -7,6 +7,7 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
 
 [dependencies]
 ansi_term = "0.12"
+anyhow = "1.0"
 async-std = "1.6.5"
 async-trait = "0.1.40"
 backoff = "0.2"
@@ -19,6 +20,11 @@ num-traits = "0.2"
 serde_json = "1.0"
 sysinfo = "0.15"
 time = "0.2"
+thiserror = "1.0.26"
+
+# Bridge dependencies
+
+bp-runtime = { path = "../../primitives/runtime" }
 
 # Substrate dependencies
 
diff --git a/polkadot/bridges/relays/utils/src/error.rs b/polkadot/bridges/relays/utils/src/error.rs
new file mode 100644
index 0000000000000000000000000000000000000000..26f1d0cacefd8eef5687e0102588f999859012a5
--- /dev/null
+++ b/polkadot/bridges/relays/utils/src/error.rs
@@ -0,0 +1,46 @@
+// Copyright 2019-2021 Parity Technologies (UK) Ltd.
+// This file is part of Parity Bridges Common.
+// Parity Bridges Common is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+// Parity Bridges Common is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+// You should have received a copy of the GNU General Public License
+// along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
+
+use std::net::AddrParseError;
+use thiserror::Error;
+
+/// Result type used by relay utilities.
+pub type Result<T> = std::result::Result<T, Error>;
+
+/// Relay utilities errors.
+#[derive(Error, Debug)]
+pub enum Error {
+	/// Failed to request a float value from HTTP service.
+	#[error("Failed to fetch token price from remote server: {0}")]
+	FetchTokenPrice(#[source] anyhow::Error),
+	/// Failed to parse the response from HTTP service.
+	#[error("Failed to parse HTTP service response: {0:?}. Response: {1:?}")]
+	ParseHttp(serde_json::Error, String),
+	/// Failed to select response value from the Json response.
+	#[error("Failed to select value from response: {0:?}. Response: {1:?}")]
+	SelectResponseValue(jsonpath_lib::JsonPathError, String),
+	/// Failed to parse float value from the selected value.
+	#[error(
+		"Failed to parse float value {0:?} from response. It is assumed to be positive and normal"
+	)]
+	ParseFloat(f64),
+	/// Couldn't found value in the JSON response.
+	#[error("Missing required value from response: {0:?}")]
+	MissingResponseValue(String),
+	/// Invalid host address was used for exposing Prometheus metrics.
+	#[error("Invalid host {0} is used to expose Prometheus metrics: {1}")]
+	ExposingMetricsInvalidHost(String, AddrParseError),
+	/// Prometheus error.
+	#[error("{0}")]
+	Prometheus(#[from] substrate_prometheus_endpoint::prometheus::Error),
+}
diff --git a/polkadot/bridges/relays/utils/src/initialize.rs b/polkadot/bridges/relays/utils/src/initialize.rs
index b87937923bd4e0b70be329cb09190076fbc4a2e2..8c13a4d61cb3a5bc4062cf2ed1373bdb580fada1 100644
--- a/polkadot/bridges/relays/utils/src/initialize.rs
+++ b/polkadot/bridges/relays/utils/src/initialize.rs
@@ -62,14 +62,7 @@ pub fn initialize_logger(with_timestamp: bool) {
 			let log_level = color_level(record.level());
 			let log_target = color_target(record.target());
 
-			writeln!(
-				buf,
-				"{}{} {} {}",
-				loop_name_prefix(),
-				log_level,
-				log_target,
-				record.args(),
-			)
+			writeln!(buf, "{}{} {} {}", loop_name_prefix(), log_level, log_target, record.args(),)
 		});
 	}
 
@@ -81,12 +74,14 @@ pub(crate) fn initialize_loop(loop_name: String) {
 	LOOP_NAME.with(|g_loop_name| *g_loop_name.borrow_mut() = loop_name);
 }
 
-/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` call.
+/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop`
+/// call.
 fn loop_name_prefix() -> String {
 	// try_with to avoid panic outside of async-std task context
 	LOOP_NAME
 		.try_with(|loop_name| {
-			// using borrow is ok here, because loop is only initialized once (=> borrow_mut will only be called once)
+			// using borrow is ok here, because loop is only initialized once (=> borrow_mut will
+			// only be called once)
 			let loop_name = loop_name.borrow();
 			if loop_name.is_empty() {
 				String::new()
diff --git a/polkadot/bridges/relays/utils/src/lib.rs b/polkadot/bridges/relays/utils/src/lib.rs
index 446e00cd23e672e95d6cdb978c454fb1b1f2f85b..a335be791242cf22ac55f2c9e7273345eacfe044 100644
--- a/polkadot/bridges/relays/utils/src/lib.rs
+++ b/polkadot/bridges/relays/utils/src/lib.rs
@@ -16,11 +16,14 @@
 
 //! Utilities used by different relays.
 
+pub use bp_runtime::HeaderId;
+pub use error::Error;
 pub use relay_loop::{relay_loop, relay_metrics};
 
 use backoff::{backoff::Backoff, ExponentialBackoff};
 use futures::future::FutureExt;
 use std::time::Duration;
+use thiserror::Error;
 
 /// Max delay after connection-unrelated error happened before we'll try the
 /// same request again.
@@ -29,6 +32,7 @@ pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60);
 /// reconnection again.
 pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10);
 
+pub mod error;
 pub mod initialize;
 pub mod metrics;
 pub mod relay_loop;
@@ -100,10 +104,6 @@ macro_rules! bail_on_arg_error {
 	};
 }
 
-/// Ethereum header Id.
-#[derive(Debug, Default, Clone, Copy, Eq, Hash, PartialEq)]
-pub struct HeaderId<Hash, Number>(pub Number, pub Hash);
-
 /// Error type that can signal connection errors.
 pub trait MaybeConnectionError {
 	/// Returns true if error (maybe) represents connection error.
@@ -111,11 +111,13 @@ pub trait MaybeConnectionError {
 }
 
 /// Stringified error that may be either connection-related or not.
-#[derive(Debug)]
+#[derive(Error, Debug)]
 pub enum StringifiedMaybeConnectionError {
 	/// The error is connection-related error.
+	#[error("{0}")]
 	Connection(String),
 	/// The error is connection-unrelated error.
+	#[error("{0}")]
 	NonConnection(String),
 }
 
@@ -139,15 +141,6 @@ impl MaybeConnectionError for StringifiedMaybeConnectionError {
 	}
 }
 
-impl ToString for StringifiedMaybeConnectionError {
-	fn to_string(&self) -> String {
-		match *self {
-			StringifiedMaybeConnectionError::Connection(ref err) => err.clone(),
-			StringifiedMaybeConnectionError::NonConnection(ref err) => err.clone(),
-		}
-	}
-}
-
 /// Exponential backoff for connection-unrelated errors retries.
 pub fn retry_backoff() -> ExponentialBackoff {
 	ExponentialBackoff {
@@ -168,12 +161,12 @@ pub fn format_ids<Id: std::fmt::Debug>(mut ids: impl ExactSizeIterator<Item = Id
 			let id0 = ids.next().expect(NTH_PROOF);
 			let id1 = ids.next().expect(NTH_PROOF);
 			format!("[{:?}, {:?}]", id0, id1)
-		}
+		},
 		len => {
 			let id0 = ids.next().expect(NTH_PROOF);
 			let id_last = ids.last().expect(NTH_PROOF);
 			format!("{}:[{:?} ... {:?}]", len, id0, id_last)
-		}
+		},
 	}
 }
 
@@ -220,7 +213,10 @@ impl ProcessFutureResult {
 	/// Returns Ok(true) if future has succeeded.
 	/// Returns Ok(false) if future has failed with non-connection error.
 	/// Returns Err if future is `ConnectionFailed`.
-	pub fn fail_if_connection_error(self, failed_client: FailedClient) -> Result<bool, FailedClient> {
+	pub fn fail_if_connection_error(
+		self,
+		failed_client: FailedClient,
+	) -> Result<bool, FailedClient> {
 		match self {
 			ProcessFutureResult::Success => Ok(true),
 			ProcessFutureResult::Failed => Ok(false),
@@ -247,7 +243,7 @@ where
 			on_success(result);
 			retry_backoff.reset();
 			ProcessFutureResult::Success
-		}
+		},
 		Err(error) if error.is_connection_error() => {
 			log::error!(
 				target: "bridge",
@@ -259,7 +255,7 @@ where
 			retry_backoff.reset();
 			go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse());
 			ProcessFutureResult::ConnectionFailed
-		}
+		},
 		Err(error) => {
 			let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY);
 			log::error!(
@@ -272,6 +268,6 @@ where
 
 			go_offline_future.set(go_offline(retry_delay).fuse());
 			ProcessFutureResult::Failed
-		}
+		},
 	}
 }
diff --git a/polkadot/bridges/relays/utils/src/metrics.rs b/polkadot/bridges/relays/utils/src/metrics.rs
index c0eaeae337ee10354286b70d59a2cca5c9916fcf..5c796071c6d5b2e2cef7dca5edfece4fdd7ca9fa 100644
--- a/polkadot/bridges/relays/utils/src/metrics.rs
+++ b/polkadot/bridges/relays/utils/src/metrics.rs
@@ -21,12 +21,16 @@ pub use substrate_prometheus_endpoint::{
 	register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, U64,
 };
 
+use async_std::sync::{Arc, RwLock};
 use async_trait::async_trait;
 use std::{fmt::Debug, time::Duration};
 
 mod float_json_value;
 mod global;
 
+/// Shared reference to `f64` value that is updated by the metric.
+pub type F64SharedRef = Arc<RwLock<Option<f64>>>;
+
 /// Unparsed address that needs to be used to expose Prometheus metrics.
 #[derive(Debug, Clone)]
 pub struct MetricsAddress {
@@ -78,21 +82,14 @@ pub trait StandaloneMetrics: Metrics {
 
 impl Default for MetricsAddress {
 	fn default() -> Self {
-		MetricsAddress {
-			host: "127.0.0.1".into(),
-			port: 9616,
-		}
+		MetricsAddress { host: "127.0.0.1".into(), port: 9616 }
 	}
 }
 
 impl MetricsParams {
 	/// Creates metrics params so that metrics are not exposed.
 	pub fn disabled() -> Self {
-		MetricsParams {
-			address: None,
-			registry: None,
-			metrics_prefix: None,
-		}
+		MetricsParams { address: None, registry: None, metrics_prefix: None }
 	}
 
 	/// Do not expose metrics.
@@ -110,11 +107,7 @@ impl MetricsParams {
 
 impl From<Option<MetricsAddress>> for MetricsParams {
 	fn from(address: Option<MetricsAddress>) -> Self {
-		MetricsParams {
-			address,
-			registry: None,
-			metrics_prefix: None,
-		}
+		MetricsParams { address, registry: None, metrics_prefix: None }
 	}
 }
 
@@ -130,7 +123,10 @@ pub fn metric_name(prefix: Option<&str>, name: &str) -> String {
 /// Set value of gauge metric.
 ///
 /// If value is `Ok(None)` or `Err(_)`, metric would have default value.
-pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &Gauge<V>, value: Result<Option<T>, E>) {
+pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(
+	gauge: &Gauge<V>,
+	value: Result<Option<T>, E>,
+) {
 	gauge.set(match value {
 		Ok(Some(value)) => {
 			log::trace!(
@@ -140,7 +136,7 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				value,
 			);
 			value
-		}
+		},
 		Ok(None) => {
 			log::warn!(
 				target: "bridge-metrics",
@@ -148,7 +144,7 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				gauge.desc().first().map(|d| &d.fq_name),
 			);
 			Default::default()
-		}
+		},
 		Err(error) => {
 			log::warn!(
 				target: "bridge-metrics",
@@ -157,6 +153,6 @@ pub fn set_gauge_value<T: Default + Debug, V: Atomic<T = T>, E: Debug>(gauge: &G
 				error,
 			);
 			Default::default()
-		}
+		},
 	})
 }
diff --git a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs
index d61f9cac7c2247f23fb397d6e7cc8523161a25d0..9404695c1c3055656e74e32ffd0ef48f4538d79f 100644
--- a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs
+++ b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs
@@ -14,8 +14,15 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::metrics::{metric_name, register, Gauge, PrometheusError, Registry, StandaloneMetrics, F64};
-
+use crate::{
+	error::{self, Error},
+	metrics::{
+		metric_name, register, F64SharedRef, Gauge, PrometheusError, Registry, StandaloneMetrics,
+		F64,
+	},
+};
+
+use async_std::sync::{Arc, RwLock};
 use async_trait::async_trait;
 use std::time::Duration;
 
@@ -23,11 +30,15 @@ use std::time::Duration;
 const UPDATE_INTERVAL: Duration = Duration::from_secs(60);
 
 /// Metric that represents float value received from HTTP service as float gauge.
+///
+/// The float value returned by the service is assumed to be normal (`f64::is_normal`
+/// should return `true`) and strictly positive.
 #[derive(Debug, Clone)]
 pub struct FloatJsonValueMetric {
 	url: String,
 	json_path: String,
 	metric: Gauge<F64>,
+	shared_value_ref: F64SharedRef,
 }
 
 impl FloatJsonValueMetric {
@@ -40,34 +51,32 @@ impl FloatJsonValueMetric {
 		name: String,
 		help: String,
 	) -> Result<Self, PrometheusError> {
+		let shared_value_ref = Arc::new(RwLock::new(None));
 		Ok(FloatJsonValueMetric {
 			url,
 			json_path,
 			metric: register(Gauge::new(metric_name(prefix, &name), help)?, registry)?,
+			shared_value_ref,
 		})
 	}
 
-	/// Read value from HTTP service.
-	async fn read_value(&self) -> Result<f64, String> {
+	/// Get shared reference to metric value.
+	pub fn shared_value_ref(&self) -> F64SharedRef {
+		self.shared_value_ref.clone()
+	}
+
+	/// Request value from HTTP service.
+	async fn request_value(&self) -> anyhow::Result<String> {
 		use isahc::{AsyncReadResponseExt, HttpClient, Request};
 
-		fn map_isahc_err(err: impl std::fmt::Display) -> String {
-			format!("Failed to fetch token price from remote server: {}", err)
-		}
-
-		let request = Request::get(&self.url)
-			.header("Accept", "application/json")
-			.body(())
-			.map_err(map_isahc_err)?;
-		let raw_response = HttpClient::new()
-			.map_err(map_isahc_err)?
-			.send_async(request)
-			.await
-			.map_err(map_isahc_err)?
-			.text()
-			.await
-			.map_err(map_isahc_err)?;
+		let request = Request::get(&self.url).header("Accept", "application/json").body(())?;
+		let raw_response = HttpClient::new()?.send_async(request).await?.text().await?;
+		Ok(raw_response)
+	}
 
+	/// Read value from HTTP service.
+	async fn read_value(&self) -> error::Result<f64> {
+		let raw_response = self.request_value().await.map_err(Error::FetchTokenPrice)?;
 		parse_service_response(&self.json_path, &raw_response)
 	}
 }
@@ -79,30 +88,28 @@ impl StandaloneMetrics for FloatJsonValueMetric {
 	}
 
 	async fn update(&self) {
-		crate::metrics::set_gauge_value(&self.metric, self.read_value().await.map(Some));
+		let value = self.read_value().await;
+		let maybe_ok = value.as_ref().ok().copied();
+		crate::metrics::set_gauge_value(&self.metric, value.map(Some));
+		*self.shared_value_ref.write().await = maybe_ok;
 	}
 }
 
 /// Parse HTTP service response.
-fn parse_service_response(json_path: &str, response: &str) -> Result<f64, String> {
-	let json = serde_json::from_str(response).map_err(|err| {
-		format!(
-			"Failed to parse HTTP service response: {:?}. Response: {:?}",
-			err, response,
-		)
-	})?;
+fn parse_service_response(json_path: &str, response: &str) -> error::Result<f64> {
+	let json =
+		serde_json::from_str(response).map_err(|err| Error::ParseHttp(err, response.to_owned()))?;
 
 	let mut selector = jsonpath_lib::selector(&json);
-	let maybe_selected_value = selector(json_path).map_err(|err| {
-		format!(
-			"Failed to select value from response: {:?}. Response: {:?}",
-			err, response,
-		)
-	})?;
+	let maybe_selected_value =
+		selector(json_path).map_err(|err| Error::SelectResponseValue(err, response.to_owned()))?;
 	let selected_value = maybe_selected_value
 		.first()
 		.and_then(|v| v.as_f64())
-		.ok_or_else(|| format!("Missing required value from response: {:?}", response,))?;
+		.ok_or_else(|| Error::MissingResponseValue(response.to_owned()))?;
+	if !selected_value.is_normal() || selected_value < 0.0 {
+		return Err(Error::ParseFloat(selected_value))
+	}
 
 	Ok(selected_value)
 }
@@ -118,4 +125,19 @@ mod tests {
 			Ok(433.05),
 		);
 	}
+
+	#[test]
+	fn parse_service_response_rejects_negative_numbers() {
+		assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":-433.05}}"#).is_err());
+	}
+
+	#[test]
+	fn parse_service_response_rejects_zero_numbers() {
+		assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":0.0}}"#).is_err());
+	}
+
+	#[test]
+	fn parse_service_response_rejects_nan() {
+		assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":NaN}}"#).is_err());
+	}
 }
diff --git a/polkadot/bridges/relays/utils/src/metrics/global.rs b/polkadot/bridges/relays/utils/src/metrics/global.rs
index d212480510448339328494b7b470b12b6fec4fad..7746690a0c72cf9caa6ca75215cb3ffa755e231f 100644
--- a/polkadot/bridges/relays/utils/src/metrics/global.rs
+++ b/polkadot/bridges/relays/utils/src/metrics/global.rs
@@ -17,7 +17,8 @@
 //! Global system-wide Prometheus metrics exposed by relays.
 
 use crate::metrics::{
-	metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics, F64, U64,
+	metric_name, register, Gauge, GaugeVec, Opts, PrometheusError, Registry, StandaloneMetrics,
+	F64, U64,
 };
 
 use async_std::sync::{Arc, Mutex};
@@ -50,7 +51,10 @@ impl GlobalMetrics {
 				registry,
 			)?,
 			process_cpu_usage_percentage: register(
-				Gauge::new(metric_name(prefix, "process_cpu_usage_percentage"), "Process CPU usage")?,
+				Gauge::new(
+					metric_name(prefix, "process_cpu_usage_percentage"),
+					"Process CPU usage",
+				)?,
 				registry,
 			)?,
 			process_memory_usage_bytes: register(
@@ -92,16 +96,19 @@ impl StandaloneMetrics for GlobalMetrics {
 					memory_usage,
 				);
 
-				self.process_cpu_usage_percentage
-					.set(if cpu_usage.is_finite() { cpu_usage } else { 0f64 });
+				self.process_cpu_usage_percentage.set(if cpu_usage.is_finite() {
+					cpu_usage
+				} else {
+					0f64
+				});
 				self.process_memory_usage_bytes.set(memory_usage);
-			}
+			},
 			_ => {
 				log::warn!(
 					target: "bridge-metrics",
 					"Failed to refresh process information. Metrics may show obsolete values",
 				);
-			}
+			},
 		}
 	}
 
diff --git a/polkadot/bridges/relays/utils/src/relay_loop.rs b/polkadot/bridges/relays/utils/src/relay_loop.rs
index 938136658bd31bf5890b4daf47ea3c130fd3f5be..4898185a150b8a56388e6fc9e2fc2a6f74c6601f 100644
--- a/polkadot/bridges/relays/utils/src/relay_loop.rs
+++ b/polkadot/bridges/relays/utils/src/relay_loop.rs
@@ -14,8 +14,11 @@
 // You should have received a copy of the GNU General Public License
 // along with Parity Bridges Common.  If not, see <http://www.gnu.org/licenses/>.
 
-use crate::metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics};
-use crate::{FailedClient, MaybeConnectionError};
+use crate::{
+	error::Error,
+	metrics::{Metrics, MetricsAddress, MetricsParams, PrometheusError, StandaloneMetrics},
+	FailedClient, MaybeConnectionError,
+};
 
 use async_trait::async_trait;
 use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration};
@@ -27,24 +30,29 @@ pub const RECONNECT_DELAY: Duration = Duration::from_secs(10);
 /// Basic blockchain client from relay perspective.
 #[async_trait]
 pub trait Client: 'static + Clone + Send + Sync {
-	/// Type of error this clients returns.
+	/// Type of error these clients returns.
 	type Error: 'static + Debug + MaybeConnectionError + Send + Sync;
 
 	/// Try to reconnect to source node.
 	async fn reconnect(&mut self) -> Result<(), Self::Error>;
 }
 
+#[async_trait]
+impl Client for () {
+	type Error = crate::StringifiedMaybeConnectionError;
+
+	async fn reconnect(&mut self) -> Result<(), Self::Error> {
+		Ok(())
+	}
+}
+
 /// Returns generic loop that may be customized and started.
 pub fn relay_loop<SC, TC>(source_client: SC, target_client: TC) -> Loop<SC, TC, ()> {
-	Loop {
-		reconnect_delay: RECONNECT_DELAY,
-		source_client,
-		target_client,
-		loop_metric: None,
-	}
+	Loop { reconnect_delay: RECONNECT_DELAY, source_client, target_client, loop_metric: None }
 }
 
-/// Returns generic relay loop metrics that may be customized and used in one or several relay loops.
+/// Returns generic relay loop metrics that may be customized and used in one or several relay
+/// loops.
 pub fn relay_metrics(prefix: Option<String>, params: MetricsParams) -> LoopMetrics<(), (), ()> {
 	LoopMetrics {
 		relay_loop: Loop {
@@ -85,7 +93,11 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 	}
 
 	/// Start building loop metrics using given prefix.
-	pub fn with_metrics(self, prefix: Option<String>, params: MetricsParams) -> LoopMetrics<SC, TC, ()> {
+	pub fn with_metrics(
+		self,
+		prefix: Option<String>,
+		params: MetricsParams,
+	) -> LoopMetrics<SC, TC, ()> {
 		LoopMetrics {
 			relay_loop: Loop {
 				reconnect_delay: self.reconnect_delay,
@@ -102,10 +114,10 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 
 	/// Run relay loop.
 	///
-	/// This function represents an outer loop, which in turn calls provided `run_loop` function to do
-	/// actual job. When `run_loop` returns, this outer loop reconnects to failed client (source,
+	/// This function represents an outer loop, which in turn calls provided `run_loop` function to
+	/// do actual job. When `run_loop` returns, this outer loop reconnects to failed client (source,
 	/// target or both) and calls `run_loop` again.
-	pub async fn run<R, F>(mut self, loop_name: String, run_loop: R) -> Result<(), String>
+	pub async fn run<R, F>(mut self, loop_name: String, run_loop: R) -> Result<(), Error>
 	where
 		R: 'static + Send + Fn(SC, TC, Option<LM>) -> F,
 		F: 'static + Send + Future<Output = Result<(), FailedClient>>,
@@ -118,20 +130,20 @@ impl<SC, TC, LM> Loop<SC, TC, LM> {
 
 			loop {
 				let loop_metric = self.loop_metric.clone();
-				let future_result = run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric);
+				let future_result =
+					run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric);
 				let result = future_result.await;
 
 				match result {
 					Ok(()) => break,
-					Err(failed_client) => {
+					Err(failed_client) =>
 						reconnect_failed_client(
 							failed_client,
 							self.reconnect_delay,
 							&mut self.source_client,
 							&mut self.target_client,
 						)
-						.await
-					}
+						.await,
 				}
 
 				log::debug!(target: "bridge", "Restarting relay loop");
@@ -151,8 +163,8 @@ impl<SC, TC, LM> LoopMetrics<SC, TC, LM> {
 	pub fn loop_metric<NewLM: Metrics>(
 		self,
 		create_metric: impl FnOnce(&Registry, Option<&str>) -> Result<NewLM, PrometheusError>,
-	) -> Result<LoopMetrics<SC, TC, NewLM>, String> {
-		let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref()).map_err(|e| e.to_string())?;
+	) -> Result<LoopMetrics<SC, TC, NewLM>, Error> {
+		let loop_metric = create_metric(&self.registry, self.metrics_prefix.as_deref())?;
 
 		Ok(LoopMetrics {
 			relay_loop: self.relay_loop,
@@ -167,13 +179,13 @@ impl<SC, TC, LM> LoopMetrics<SC, TC, LM> {
 	pub fn standalone_metric<M: StandaloneMetrics>(
 		self,
 		create_metric: impl FnOnce(&Registry, Option<&str>) -> Result<M, PrometheusError>,
-	) -> Result<Self, String> {
-		// since standalone metrics are updating themselves, we may just ignore the fact that the same
-		// standalone metric is exposed by several loops && only spawn single metric
+	) -> Result<Self, Error> {
+		// since standalone metrics are updating themselves, we may just ignore the fact that the
+		// same standalone metric is exposed by several loops && only spawn single metric
 		match create_metric(&self.registry, self.metrics_prefix.as_deref()) {
 			Ok(standalone_metrics) => standalone_metrics.spawn(),
 			Err(PrometheusError::AlreadyReg) => (),
-			Err(e) => return Err(e.to_string()),
+			Err(e) => return Err(e.into()),
 		}
 
 		Ok(self)
@@ -191,15 +203,13 @@ impl<SC, TC, LM> LoopMetrics<SC, TC, LM> {
 	/// Expose metrics using address passed at creation.
 	///
 	/// If passed `address` is `None`, metrics are not exposed.
-	pub async fn expose(self) -> Result<Loop<SC, TC, LM>, String> {
+	pub async fn expose(self) -> Result<Loop<SC, TC, LM>, Error> {
 		if let Some(address) = self.address {
 			let socket_addr = SocketAddr::new(
-				address.host.parse().map_err(|err| {
-					format!(
-						"Invalid host {} is used to expose Prometheus metrics: {}",
-						address.host, err,
-					)
-				})?,
+				address
+					.host
+					.parse()
+					.map_err(|err| Error::ExposingMetricsInvalidHost(address.host.clone(), err))?,
 				address.port,
 			);
 
@@ -242,8 +252,8 @@ pub async fn reconnect_failed_client(
 						reconnect_delay.as_secs(),
 						error,
 					);
-					continue;
-				}
+					continue
+				},
 			}
 		}
 		if failed_client == FailedClient::Both || failed_client == FailedClient::Target {
@@ -256,12 +266,12 @@ pub async fn reconnect_failed_client(
 						reconnect_delay.as_secs(),
 						error,
 					);
-					continue;
-				}
+					continue
+				},
 			}
 		}
 
-		break;
+		break
 	}
 }
 
@@ -270,8 +280,9 @@ fn create_metrics_registry(prefix: Option<String>) -> Registry {
 	match prefix {
 		Some(prefix) => {
 			assert!(!prefix.is_empty(), "Metrics prefix can not be empty");
-			Registry::new_custom(Some(prefix), None).expect("only fails if prefix is empty; prefix is not empty; qed")
-		}
+			Registry::new_custom(Some(prefix), None)
+				.expect("only fails if prefix is empty; prefix is not empty; qed")
+		},
 		None => Registry::new(),
 	}
 }
diff --git a/polkadot/bridges/rustfmt.toml b/polkadot/bridges/rustfmt.toml
index 8ded863e80af2390432ee5db3b9f65848f3eefad..082150daf04ee39ada660c315fd0f5bbcf99dea0 100644
--- a/polkadot/bridges/rustfmt.toml
+++ b/polkadot/bridges/rustfmt.toml
@@ -1,3 +1,24 @@
+# Basic
 hard_tabs = true
-max_width = 120
-edition = "2018"
+max_width = 100
+use_small_heuristics = "Max"
+# Imports
+imports_granularity = "Crate"
+reorder_imports = true
+# Consistency
+newline_style = "Unix"
+# Format comments
+comment_width = 100
+wrap_comments = true
+# Misc
+chain_width = 80
+spaces_around_ranges = false
+binop_separator = "Back"
+reorder_impl_items = false
+match_arm_leading_pipes = "Preserve"
+match_arm_blocks = false
+match_block_trailing_comma = true
+trailing_comma = "Vertical"
+trailing_semicolon = false
+use_field_init_shorthand = true
+
diff --git a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh
index 10fe24087fa40fa89bbeeb23e2567f71ed886e4d..d14b08021ee158ec316afdb1113d9bfb23540f4a 100755
--- a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh
+++ b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh
@@ -11,7 +11,7 @@ MILLAU_PORT="${RIALTO_PORT:-9945}"
 case "$1" in
 	remark)
 		RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \
-		./target/debug/substrate-relay send-message MillauToRialto \
+		./target/debug/substrate-relay send-message millau-to-rialto \
 			--source-host localhost \
 			--source-port $MILLAU_PORT \
 			--source-signer //Alice \
@@ -22,7 +22,7 @@ case "$1" in
 		;;
 	transfer)
 		RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \
-		./target/debug/substrate-relay send-message MillauToRialto \
+		./target/debug/substrate-relay send-message millau-to-rialto \
 			--source-host localhost \
 			--source-port $MILLAU_PORT \
 			--source-signer //Alice \
diff --git a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh
index 52d19e3af8839cf179544cc7aa8e8531b7a92265..10582aa6b3a725ff1101736253209c33773df79c 100755
--- a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh
+++ b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh
@@ -11,7 +11,7 @@ RIALTO_PORT="${RIALTO_PORT:-9944}"
 case "$1" in
 	remark)
 		RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \
-		./target/debug/substrate-relay send-message RialtoToMillau \
+		./target/debug/substrate-relay send-message rialto-to-millau \
 			--source-host localhost \
 			--source-port $RIALTO_PORT \
 			--target-signer //Alice \
@@ -22,7 +22,7 @@ case "$1" in
 		;;
 	transfer)
 		RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \
-		./target/debug/substrate-relay send-message RialtoToMillau \
+		./target/debug/substrate-relay send-message rialto-to-millau \
 			--source-host localhost \
 			--source-port $RIALTO_PORT \
 			--target-signer //Alice \
diff --git a/polkadot/bridges/scripts/update-weights.sh b/polkadot/bridges/scripts/update-weights.sh
index 0ac773e8d7b46d2f4c9dd5b3ed65d1171c459831..5ee7bb9e8d8e1c42a24ffdeead289441569c8315 100755
--- a/polkadot/bridges/scripts/update-weights.sh
+++ b/polkadot/bridges/scripts/update-weights.sh
@@ -29,3 +29,15 @@ time cargo run --release -p rialto-bridge-node --features=runtime-benchmarks --
 	--heap-pages=4096 \
 	--output=./modules/grandpa/src/weights.rs \
 	--template=./.maintain/rialto-weight-template.hbs
+
+time cargo run --release -p millau-bridge-node --features=runtime-benchmarks -- benchmark \
+	--chain=dev \
+	--steps=50 \
+	--repeat=20 \
+	--pallet=pallet_bridge_token_swap \
+	--extrinsic=* \
+	--execution=wasm \
+	--wasm-execution=Compiled \
+	--heap-pages=4096 \
+	--output=./modules/token-swap/src/weights.rs \
+	--template=./.maintain/millau-weight-template.hbs